text
stringlengths 4
1.02M
| meta
dict |
|---|---|
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.auth',
'django.contrib.admin',
'adminplus',
)
SECRET_KEY = 'adminplus'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
},
}
ROOT_URLCONF = 'test_urlconf'
MIDDLEWARE_CLASSES = ()
|
{
"content_hash": "8a2dfd35d570c7cf7bd9f0d39634aadd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 47,
"avg_line_length": 19.35,
"alnum_prop": 0.6098191214470284,
"repo_name": "drdaeman/django-adminplus",
"id": "de4d0a1199c0aad874bf157f1454f7516135a2a6",
"size": "387",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "731"
},
{
"name": "Python",
"bytes": "9431"
},
{
"name": "Shell",
"bytes": "463"
}
],
"symlink_target": ""
}
|
"""
iSCSI Cinder Volume driver for Fujitsu ETERNUS DX S3 series.
"""
import six
from cinder.i18n import _LI
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.fujitsu import eternus_dx_common
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@interface.volumedriver
class FJDXISCSIDriver(driver.ISCSIDriver):
"""iSCSI Cinder Volume Driver for Fujitsu ETERNUS DX S3 series."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Fujitsu_ETERNUS_CI"
VERSION = eternus_dx_common.FJDXCommon.VERSION
def __init__(self, *args, **kwargs):
super(FJDXISCSIDriver, self).__init__(*args, **kwargs)
self.common = eternus_dx_common.FJDXCommon(
'iSCSI',
configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
return
def create_volume(self, volume):
"""Create volume."""
LOG.info(_LI('create_volume, '
'volume id: %s, Enter method.'), volume['id'])
element_path, metadata = self.common.create_volume(volume)
v_metadata = volume.get('volume_metadata')
if v_metadata:
for data in v_metadata:
metadata[data['key']] = data['value']
else:
v_metadata = volume.get('metadata', {})
metadata.update(v_metadata)
LOG.info(_LI('create_volume, info: %s, Exit method.'), metadata)
return {'provider_location': six.text_type(element_path),
'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.info(_LI('create_volume_from_snapshot, '
'volume id: %(vid)s, snap id: %(sid)s, Enter method.'),
{'vid': volume['id'], 'sid': snapshot['id']})
element_path, metadata = (
self.common.create_volume_from_snapshot(volume, snapshot))
v_metadata = volume.get('volume_metadata')
if v_metadata:
for data in v_metadata:
metadata[data['key']] = data['value']
else:
v_metadata = volume.get('metadata', {})
metadata.update(v_metadata)
LOG.info(_LI('create_volume_from_snapshot, '
'info: %s, Exit method.'), metadata)
return {'provider_location': six.text_type(element_path),
'metadata': metadata}
def create_cloned_volume(self, volume, src_vref):
"""Create cloned volume."""
LOG.info(_LI('create_cloned_volume, '
'target volume id: %(tid)s, '
'source volume id: %(sid)s, Enter method.'),
{'tid': volume['id'], 'sid': src_vref['id']})
element_path, metadata = (
self.common.create_cloned_volume(volume, src_vref))
v_metadata = volume.get('volume_metadata')
if v_metadata:
for data in v_metadata:
metadata[data['key']] = data['value']
else:
v_metadata = volume.get('metadata', {})
metadata.update(v_metadata)
LOG.info(_LI('create_cloned_volume, '
'info: %s, Exit method.'), metadata)
return {'provider_location': six.text_type(element_path),
'metadata': metadata}
def delete_volume(self, volume):
"""Delete volume on ETERNUS."""
LOG.info(_LI('delete_volume, '
'volume id: %s, Enter method.'), volume['id'])
vol_exist = self.common.delete_volume(volume)
LOG.info(_LI('delete_volume, '
'delete: %s, Exit method.'), vol_exist)
return
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.info(_LI('create_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, Enter method.'),
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
element_path, metadata = self.common.create_snapshot(snapshot)
LOG.info(_LI('create_snapshot, info: %s, Exit method.'), metadata)
return {'provider_location': six.text_type(element_path)}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.info(_LI('delete_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, Enter method.'),
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.common.delete_snapshot(snapshot)
LOG.info(_LI('delete_snapshot, '
'delete: %s, Exit method.'), vol_exist)
return
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
return
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
return
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
return
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
LOG.info(_LI('initialize_connection, volume id: %(vid)s, '
'initiator: %(initiator)s, Enter method.'),
{'vid': volume['id'], 'initiator': connector['initiator']})
info = self.common.initialize_connection(volume, connector)
LOG.info(_LI('initialize_connection, '
'info: %s, Exit method.'), info)
return info
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.info(_LI('terminate_connection, volume id: %(vid)s, '
'initiator: %(initiator)s, Enter method.'),
{'vid': volume['id'], 'initiator': connector['initiator']})
map_exist = self.common.terminate_connection(volume, connector)
LOG.info(_LI('terminate_connection, '
'unmap: %s, Exit method.'), map_exist)
return
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh)
pool_name = None
if refresh is True:
data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver'
data['storage_protocol'] = 'iSCSI'
self._stats = data
LOG.debug('get_volume_stats, '
'pool name: %s, Exit method.', pool_name)
return self._stats
def extend_volume(self, volume, new_size):
"""Extend volume."""
LOG.info(_LI('extend_volume, '
'volume id: %s, Enter method.'), volume['id'])
used_pool_name = self.common.extend_volume(volume, new_size)
LOG.info(_LI('extend_volume, '
'used pool name: %s, Exit method.'), used_pool_name)
|
{
"content_hash": "03b33d51a1651c98fe43c59d918b3b02",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 79,
"avg_line_length": 37.078125,
"alnum_prop": 0.5693215339233039,
"repo_name": "cloudbase/cinder",
"id": "ca878227780b5fda0e321b38fa9c11d7448ab5e2",
"size": "7836",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17586629"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import logging
from webtest import TestApp
from slamon_afm.app import create_app
from slamon_afm.models import db
# Log everything during tests
logging.basicConfig(level=logging.DEBUG)
class AFMTest(TestCase):
AFM_CONFIG = {
'SQLALCHEMY_DATABASE_URI': 'sqlite://'
}
def setUp(self):
self.app = create_app(config=self.AFM_CONFIG)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.test_app = TestApp(self.app)
def tearDown(self):
db.drop_all()
self.app_context.pop()
|
{
"content_hash": "efc2ee0bb923cb0db27c9292c3ffb65d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 53,
"avg_line_length": 21.413793103448278,
"alnum_prop": 0.6586151368760065,
"repo_name": "StealthyLoner/slamon-agent-fleet-manager",
"id": "00463067b132d93deea9150b21315438a5aa4938",
"size": "621",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "slamon_afm/tests/afm_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6560"
},
{
"name": "Python",
"bytes": "44821"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.backend.codegen.thrift.java.apache_thrift_java_gen import ApacheThriftJavaGen
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
'java_thrift_library': JavaThriftLibrary,
}
)
def register_goals():
task(name='thrift-java', action=ApacheThriftJavaGen).install('gen')
|
{
"content_hash": "bb0ab924d2fa83749d0154dba59d92d6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 88,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.78125,
"repo_name": "twitter/pants",
"id": "a6d5d929c22bf092ad9ce64f8c39c8ded7c2b555",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/codegen/thrift/java/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
from indy_client.test import waits
from stp_core.loop.eventually import eventually
from anoncreds.protocol.types import SchemaKey, ID
def test_send_proof_works(aliceAgent, aliceAcceptedFaber, aliceAcceptedAcme,
acmeAgent, emptyLooper):
# 1. request Claims from Faber
faberLink = aliceAgent.wallet.getConnection('Faber College')
name, version, origin = faberLink.availableClaims[0]
schemaKey = SchemaKey(name, version, origin)
aliceAgent.sendReqClaim(faberLink, schemaKey)
# 2. check that claim is received from Faber
async def chkClaims():
claim = await aliceAgent.prover.wallet.getClaimSignature(ID(schemaKey))
assert claim.primaryClaim
emptyLooper.run(eventually(
chkClaims, timeout=waits.expectedClaimsReceived()))
# 3. send Proof Request to Alice
alice_link = acmeAgent.wallet.getConnection('Alice')
acmeAgent.sendProofReq(alice_link, 'Job-Application-v0.3')
def chkProofRequest():
assert len(aliceAgent.wallet.getMatchingConnectionsWithProofReq(
"Job-Application-2", "Acme Corp")) > 0
emptyLooper.run(eventually(chkProofRequest,
timeout=waits.expectedClaimsReceived()))
# 4. send proof to Acme
acme_link, acme_proof_req = aliceAgent.wallet.getMatchingConnectionsWithProofReq(
"Job-Application-2", "Acme Corp")[0]
aliceAgent.sendProof(acme_link, acme_proof_req)
# 5. check that proof is verified by Acme
def chkProof():
internalId = acmeAgent.get_internal_id_by_nonce(
acme_link.request_nonce)
link = acmeAgent.wallet.getConnectionBy(internalId=internalId)
assert "Job-Application-2" in link.verifiedClaimProofs
emptyLooper.run(eventually(
chkProof, timeout=waits.expectedClaimsReceived()))
|
{
"content_hash": "1d9c1ed74f37df313a0aeeb3a12d2c13",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 85,
"avg_line_length": 39.869565217391305,
"alnum_prop": 0.7050163576881134,
"repo_name": "TechWritingWhiz/indy-node",
"id": "512576c79a8e6ee73f1a8b40e7afa42ec9455627",
"size": "1834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indy_client/test/agent/test_anoncreds_send_proof_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3131"
},
{
"name": "Groovy",
"bytes": "8886"
},
{
"name": "Makefile",
"bytes": "2073"
},
{
"name": "Python",
"bytes": "1283603"
},
{
"name": "Ruby",
"bytes": "65411"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "127911"
}
],
"symlink_target": ""
}
|
from test import test_banter, test_config, test_crucible, test_dict2xml, test_patch, test_utils
|
{
"content_hash": "5740e2dd16f98c40d443e47c9e5ce209",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 95,
"avg_line_length": 96,
"alnum_prop": 0.7916666666666666,
"repo_name": "spookylukey/banter",
"id": "d3fab0fa1b3db5ac046aa87eae81ae5d926d4ce8",
"size": "96",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25406"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
}
|
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib
import tensorflow as tf
# Data sets
IRIS_TRAINING = 'iris_training.csv'
IRIS_TRAINING_URL = 'http://download.tensorflow.org/data/iris_training.csv'
IRIS_TEST = 'iris_test.csv'
IRIS_TEST_URL = 'http://download.tensorflow.org/data/iris_test.csv'
FEATURE_KEYS = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
def maybe_download_iris_data(file_name, download_url):
"""Downloads the file and returns the number of data."""
if not os.path.exists(file_name):
raw = urllib.urlopen(download_url).read()
with open(file_name, 'w') as f:
f.write(raw)
# The first line is a comma-separated string. The first one is the number of
# total data in the file.
with open(file_name, 'r') as f:
first_line = f.readline()
num_elements = first_line.split(',')[0]
return int(num_elements)
def input_fn(file_name, num_data, batch_size, is_training):
"""Creates an input_fn required by Estimator train/evaluate."""
# If the data sets aren't stored locally, download them.
def _parse_csv(rows_string_tensor):
"""Takes the string input tensor and returns tuple of (features, labels)."""
# Last dim is the label.
num_features = len(FEATURE_KEYS)
num_columns = num_features + 1
columns = tf.decode_csv(rows_string_tensor,
record_defaults=[[]] * num_columns)
features = dict(zip(FEATURE_KEYS, columns[:num_features]))
labels = tf.cast(columns[num_features], tf.int32)
return features, labels
def _input_fn():
"""The input_fn."""
dataset = tf.data.TextLineDataset([file_name])
# Skip the first line (which does not have data).
dataset = dataset.skip(1)
dataset = dataset.map(_parse_csv)
if is_training:
# For this small dataset, which can fit into memory, to achieve true
# randomness, the shuffle buffer size is set as the total number of
# elements in the dataset.
dataset = dataset.shuffle(num_data)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
num_training_data = maybe_download_iris_data(
IRIS_TRAINING, IRIS_TRAINING_URL)
num_test_data = maybe_download_iris_data(IRIS_TEST, IRIS_TEST_URL)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(key, shape=1) for key in FEATURE_KEYS]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = input_fn(IRIS_TRAINING, num_training_data, batch_size=32,
is_training=True)
classifier.train(input_fn=train_input_fn, steps=400)
# Eval.
test_input_fn = input_fn(IRIS_TEST, num_test_data, batch_size=32,
is_training=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "20c3323d145a3ae96b6b84a9b2e4f6d2",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 33.39,
"alnum_prop": 0.6744534291704103,
"repo_name": "ArtsiomCh/tensorflow",
"id": "0a50b3ba87d70a58794bc35009dc76de2cb71d1e",
"size": "3954",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/examples/learn/iris.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "191705"
},
{
"name": "C++",
"bytes": "28812426"
},
{
"name": "CMake",
"bytes": "637222"
},
{
"name": "Go",
"bytes": "960638"
},
{
"name": "Java",
"bytes": "407004"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38120"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "265074"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25375775"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "372058"
}
],
"symlink_target": ""
}
|
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
from nipype.interfaces.base import (CommandLineInputSpec, CommandLine, traits,
TraitedSpec, File, StdOutCommandLine,
StdOutCommandLineInputSpec, isdefined)
from nipype.utils.filemanip import split_filename
class Image2VoxelInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='-4dimage %s',
mandatory=True, position=1,
desc='4d image file')
#TODO convert list of files on the fly
# imagelist = File(exists=True, argstr='-imagelist %s',
# mandatory=True, position=1,
# desc='Name of a file containing a list of 3D images')
#
# imageprefix = traits.Str(argstr='-imageprefix %s', position=3,
# desc='Path to prepend onto filenames in the imagelist.')
out_type = traits.Enum("float", "char", "short", "int", "long", "double", argstr='-outputdatatype %s', position=2,
desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', usedefault=True)
class Image2VoxelOutputSpec(TraitedSpec):
voxel_order = File(exists=True, desc='path/name of 4D volume in voxel order')
class Image2Voxel(StdOutCommandLine):
"""
Converts Analyze / NIFTI / MHA files to voxel order.
Converts scanner-order data in a supported image format to voxel-order data.
Either takes a 4D file (all measurements in single image)
or a list of 3D images.
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> img2vox = cmon.Image2Voxel()
>>> img2vox.inputs.in_file = '4d_dwi.nii'
>>> img2vox.run() # doctest: +SKIP
"""
_cmd = 'image2voxel'
input_spec = Image2VoxelInputSpec
output_spec = Image2VoxelOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['voxel_order'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '.B'+ self.inputs.out_type
class FSL2SchemeInputSpec(StdOutCommandLineInputSpec):
bvec_file = File(exists=True, argstr='-bvecfile %s',
mandatory=True, position=1,
desc='b vector file')
bval_file = File(exists=True, argstr='-bvalfile %s',
mandatory=True, position=2,
desc='b value file')
numscans = traits.Int(argstr='-numscans %d', units='NA',
desc="Output all measurements numerous (n) times, used when combining multiple scans from the same imaging session.")
interleave = traits.Bool(argstr='-interleave', desc="Interleave repeated scans. Only used with -numscans.")
bscale = traits.Float(argstr='-bscale %d', units='NA',
desc="Scaling factor to convert the b-values into different units. Default is 10^6.")
diffusiontime = traits.Float(argstr = '-diffusiontime %f', units = 'NA',
desc="Diffusion time")
flipx = traits.Bool(argstr='-flipx', desc="Negate the x component of all the vectors.")
flipy = traits.Bool(argstr='-flipy', desc="Negate the y component of all the vectors.")
flipz = traits.Bool(argstr='-flipz', desc="Negate the z component of all the vectors.")
usegradmod = traits.Bool(argstr='-usegradmod', desc="Use the gradient magnitude to scale b. This option has no effect if your gradient directions have unit magnitude.")
class FSL2SchemeOutputSpec(TraitedSpec):
scheme = File(exists=True, desc='Scheme file')
class FSL2Scheme(StdOutCommandLine):
"""
Converts b-vectors and b-values from FSL format to a Camino scheme file.
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> makescheme = cmon.FSL2Scheme()
>>> makescheme.inputs.bvec_file = 'bvecs'
>>> makescheme.inputs.bvec_file = 'bvals'
>>> makescheme.run() # doctest: +SKIP
"""
_cmd = 'fsl2scheme'
input_spec=FSL2SchemeInputSpec
output_spec=FSL2SchemeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['scheme'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.bvec_file)
return name + '.scheme'
class VtkStreamlinesInputSpec(StdOutCommandLineInputSpec):
inputmodel = traits.Enum('raw', 'voxels', argstr='-inputmodel %s', desc='input model type (raw or voxels)', usedefault=True)
in_file = File(exists=True, argstr=' < %s',
mandatory=True, position=-2,
desc='data file')
voxeldims = traits.List(traits.Int, desc = 'voxel dimensions in mm',
argstr='-voxeldims %s', minlen=3, maxlen=3, position=4,
units='mm')
seed_file = File(exists=False, argstr='-seedfile %s', position=1,
desc='image containing seed points')
target_file = File(exists=False, argstr='-targetfile %s', position=2,
desc='image containing integer-valued target regions')
scalar_file = File(exists=False, argstr='-scalarfile %s', position=3,
desc='image that is in the same physical space as the tracts')
colourorient = traits.Bool(argstr='-colourorient', desc="Each point on the streamline is coloured by the local orientation.")
interpolatescalars = traits.Bool(argstr='-interpolatescalars', desc="the scalar value at each point on the streamline is calculated by trilinear interpolation")
interpolate = traits.Bool(argstr='-interpolate', desc="the scalar value at each point on the streamline is calculated by trilinear interpolation")
class VtkStreamlinesOutputSpec(TraitedSpec):
vtk = File(exists=True, desc='Streamlines in VTK format')
class VtkStreamlines(StdOutCommandLine):
"""
Use vtkstreamlines to convert raw or voxel format streamlines to VTK polydata
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> vtk = cmon.VtkStreamlines()
>>> vtk.inputs.in_file = 'tract_data.Bfloat'
>>> vtk.inputs.voxeldims = [1,1,1]
>>> vtk.run() # doctest: +SKIP
"""
_cmd = 'vtkstreamlines'
input_spec=VtkStreamlinesInputSpec
output_spec=VtkStreamlinesOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['vtk'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '.vtk'
class ProcStreamlinesInputSpec(StdOutCommandLineInputSpec):
inputmodel = traits.Enum('raw', 'voxels', argstr='-inputmodel %s', desc='input model type (raw or voxels)', usedefault=True)
in_file = File(exists=True, argstr='-inputfile %s',
mandatory=True, position=1,
desc='data file')
maxtractpoints= traits.Int(argstr='-maxtractpoints %d', units='NA',
desc="maximum number of tract points")
mintractpoints= traits.Int(argstr='-mintractpoints %d', units='NA',
desc="minimum number of tract points")
maxtractlength= traits.Int(argstr='-maxtractlength %d', units='mm',
desc="maximum length of tracts")
mintractlength= traits.Int(argstr='-mintractlength %d', units='mm',
desc="minimum length of tracts")
datadims = traits.List(traits.Int, desc = 'data dimensions in voxels',
argstr='-datadims %s', minlen=3, maxlen=3,
units='voxels')
voxeldims = traits.List(traits.Int, desc = 'voxel dimensions in mm',
argstr='-voxeldims %s', minlen=3, maxlen=3,
units='mm')
seedpointmm = traits.List(traits.Int, desc = 'The coordinates of a single seed point for tractography in mm',
argstr='-seedpointmm %s', minlen=3, maxlen=3,
units='mm')
seedpointvox = traits.List(traits.Int, desc = 'The coordinates of a single seed point for tractography in voxels',
argstr='-seedpointvox %s', minlen=3, maxlen=3,
units='voxels')
seedfile = File(exists=False, argstr='-seedfile %s',
desc='Image Containing Seed Points')
regionindex = traits.Int(argstr='-regionindex %d', units='mm',
desc="index of specific region to process")
iterations = traits.Float(argstr='-iterations %d', units='NA',
desc="Number of streamlines generated for each seed. Not required when outputting streamlines, but needed to create PICo images. The default is 1 if the output is streamlines, and 5000 if the output is connection probability images.")
targetfile = File(exists=False, argstr='-targetfile %s',
desc='Image containing target volumes.')
allowmultitargets = traits.Bool(argstr='-allowmultitargets', desc="Allows streamlines to connect to multiple target volumes.")
directional = traits.List(traits.Int, desc = 'Splits the streamlines at the seed point and computes separate connection probabilities for each segment. Streamline segments are grouped according to their dot product with the vector (X, Y, Z). The ideal vector will be tangential to the streamline trajectory at the seed, such that the streamline projects from the seed along (X, Y, Z) and -(X, Y, Z). However, it is only necessary for the streamline trajectory to not be orthogonal to (X, Y, Z).',
argstr='-directional %s', minlen=3, maxlen=3,
units='NA')
waypointfile = File(exists=False, argstr='-waypointfile %s',
desc='Image containing waypoints. Waypoints are defined as regions of the image with the same intensity, where 0 is background and any value > 0 is a waypoint.')
truncateloops = traits.Bool(argstr='-truncateloops', desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, it is truncated upon a second entry to the waypoint.")
discardloops = traits.Bool(argstr='-discardloops', desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, the entire streamline is discarded upon a second entry to the waypoint.")
exclusionfile = File(exists=False, argstr='-exclusionfile %s',
desc='Image containing exclusion ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.')
truncateinexclusion = traits.Bool(argstr='-truncateinexclusion', desc="Retain segments of a streamline before entry to an exclusion ROI.")
endpointfile = File(exists=False, argstr='-endpointfile %s',
desc='Image containing endpoint ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.')
resamplestepsize = traits.Float(argstr='-resamplestepsize %d', units='NA',
desc="Each point on a streamline is tested for entry into target, exclusion or waypoint volumes. If the length between points on a tract is not much smaller than the voxel length, then streamlines may pass through part of a voxel without being counted. To avoid this, the program resamples streamlines such that the step size is one tenth of the smallest voxel dimension in the image. This increases the size of raw or oogl streamline output and incurs some performance penalty. The resample resolution can be controlled with this option or disabled altogether by passing a negative step size or by passing the -noresample option.")
noresample = traits.Bool(argstr='-noresample', desc="Disables resampling of input streamlines. Resampling is automatically disabled if the input model is voxels.")
outputtracts = traits.Bool(argstr='-outputtracts', desc="Output streamlines in raw binary format.")
outputroot = File(exists=False, argstr='-outputroot %s',
desc='root directory for output')
gzip = traits.Bool(argstr='-gzip', desc="save the output image in gzip format")
outputcp = traits.Bool(argstr='-outputcp', desc="output the connection probability map (Analyze image, float)")
outputsc = traits.Bool(argstr='-outputsc', desc="output the connection probability map (raw streamlines, int)")
outputacm = traits.Bool(argstr='-outputacm', desc="output all tracts in a single connection probability map (Analyze image)")
outputcbs = traits.Bool(argstr='-outputcbs', desc="outputs connectivity-based segmentation maps; requires target outputfile")
class ProcStreamlinesOutputSpec(TraitedSpec):
proc = File(exists=True, desc='Processed Streamlines')
class ProcStreamlines(StdOutCommandLine):
"""
Process streamline data
This program does post-processing of streamline output from track. It can either output streamlines or connection probability maps.
* http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php?n=Man.procstreamlines
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> proc = cmon.ProcStreamlines()
>>> proc.inputs.in_file = 'tract_data.Bfloat'
>>> proc.run() # doctest: +SKIP
"""
_cmd = 'procstreamlines'
input_spec=ProcStreamlinesInputSpec
output_spec=ProcStreamlinesOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['proc'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '_proc'
class TractShredderInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='< %s', mandatory=True, position=-2, desc='tract file')
offset = traits.Int(argstr='%d', units='NA',
desc='initial offset of offset tracts', position=1)
bunchsize = traits.Int(argstr='%d', units='NA',
desc='reads and outputs a group of bunchsize tracts', position=2)
space = traits.Int(argstr='%d', units='NA',
desc='skips space tracts', position=3)
class TractShredderOutputSpec(TraitedSpec):
shredded = File(exists=True, desc='Shredded tract file')
class TractShredder(StdOutCommandLine):
"""
Extracts bunches of streamlines.
tractshredder works in a similar way to shredder, but processes streamlines instead of scalar data.
The input is raw streamlines, in the format produced by track or procstreamlines.
The program first makes an initial offset of offset tracts. It then reads and outputs a group of
bunchsize tracts, skips space tracts, and repeats until there is no more input.
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> shred = cmon.TractShredder()
>>> shred.inputs.in_file = 'tract_data.Bfloat'
>>> shred.inputs.offset = 0
>>> shred.inputs.bunchsize = 1
>>> shred.inputs.space = 2
>>> shred.run() # doctest: +SKIP
"""
_cmd = 'tractshredder'
input_spec=TractShredderInputSpec
output_spec=TractShredderOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['shredded'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + "_shredded"
class DT2NIfTIInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1,
desc='tract file')
output_root = File(argstr='-outputroot %s', position=2, genfile=True,
desc='filename root prepended onto the names of three output files.')
header_file = File(exists=True, argstr='-header %s', mandatory=True, position=3,
desc=' A Nifti .nii or .hdr file containing the header information')
class DT2NIfTIOutputSpec(TraitedSpec):
dt = File(exists=True, desc='diffusion tensors in NIfTI format')
exitcode = File(exists=True, desc='exit codes from Camino reconstruction in NIfTI format')
lns0 = File(exists=True, desc='estimated lns0 from Camino reconstruction in NIfTI format')
class DT2NIfTI(CommandLine):
"""
Converts camino tensor data to NIfTI format
Reads Camino diffusion tensors, and converts them to NIFTI format as three .nii files.
"""
_cmd = 'dt2nii'
input_spec=DT2NIfTIInputSpec
output_spec=DT2NIfTIOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
output_root = self._gen_outputroot()
outputs["dt"] = os.path.abspath(output_root + "dt.nii")
outputs["exitcode"] = os.path.abspath(output_root + "exitcode.nii")
outputs["lns0"] = os.path.abspath(output_root + "lns0.nii")
return outputs
def _gen_outfilename(self):
return self._gen_outputroot()
def _gen_outputroot(self):
output_root = self.inputs.output_root
if not isdefined(output_root):
output_root = self._gen_filename('output_root')
return output_root
def _gen_filename(self, name):
if name == 'output_root':
_, filename , _ = split_filename(self.inputs.in_file)
filename = filename + "_"
return filename
class NIfTIDT2CaminoInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1,
desc='A NIFTI-1 dataset containing diffusion tensors. The tensors are assumed to be '
'in lower-triangular order as specified by the NIFTI standard for the storage of '
'symmetric matrices. This file should be either a .nii or a .hdr file.')
s0_file = File(argstr='-s0 %s', exists=True,
desc='File containing the unweighted signal for each voxel, may be a raw binary '
'file (specify type with -inputdatatype) or a supported image file.')
lns0_file = File(argstr='-lns0 %s', exists=True,
desc='File containing the log of the unweighted signal for each voxel, may be a '
'raw binary file (specify type with -inputdatatype) or a supported image file.')
bgmask = File(argstr='-bgmask %s', exists=True,
desc='Binary valued brain / background segmentation, may be a raw binary file '
'(specify type with -maskdatatype) or a supported image file.')
scaleslope = traits.Float(argstr='-scaleslope %s',
desc='A value v in the diffusion tensor is scaled to v * s + i. This is '
'applied after any scaling specified by the input image. Default is 1.0.')
scaleinter = traits.Float(argstr='-scaleinter %s',
desc='A value v in the diffusion tensor is scaled to v * s + i. This is '
'applied after any scaling specified by the input image. Default is 0.0.')
uppertriangular = traits.Bool(argstr='-uppertriangular %s',
desc = 'Specifies input in upper-triangular (VTK style) order.')
class NIfTIDT2CaminoOutputSpec(TraitedSpec):
out_file = File(desc='diffusion tensors data in Camino format')
class NIfTIDT2Camino(CommandLine):
"""
Converts NIFTI-1 diffusion tensors to Camino format. The program reads the
NIFTI header but does not apply any spatial transformations to the data. The
NIFTI intensity scaling parameters are applied.
The output is the tensors in Camino voxel ordering: [exit, ln(S0), dxx, dxy,
dxz, dyy, dyz, dzz].
The exit code is set to 0 unless a background mask is supplied, in which case
the code is 0 in brain voxels and -1 in background voxels.
The value of ln(S0) in the output is taken from a file if one is supplied,
otherwise it is set to 0.
NOTE FOR FSL USERS - FSL's dtifit can output NIFTI tensors, but they are not
stored in the usual way (which is using NIFTI_INTENT_SYMMATRIX). FSL's
tensors follow the ITK / VTK "upper-triangular" convention, so you will need
to use the -uppertriangular option to convert these correctly.
"""
_cmd = 'niftidt2camino'
input_spec=NIfTIDT2CaminoInputSpec
output_spec=NIfTIDT2CaminoOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self._gen_filename('out_file')
return outputs
def _gen_filename(self, name):
if name == 'out_file':
_, filename , _ = split_filename(self.inputs.in_file)
return filename
class AnalyzeHeaderInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='< %s', mandatory=True, position=1,
desc='Tensor-fitted data filename')
scheme_file = File(exists=True, argstr='%s', position=2,
desc=('Camino scheme file (b values / vectors, '
'see camino.fsl2scheme)'))
readheader = File(exists=True, argstr='-readheader %s', position=3,
desc=('Reads header information from file and prints to '
'stdout. If this option is not specified, then the '
'program writes a header based on the other '
'arguments.'))
printimagedims = File(exists=True, argstr='-printimagedims %s', position=3,
desc=('Prints image data and voxel dimensions as '
'Camino arguments and exits.'))
# How do we implement both file and enum (for the program) in one argument?
# Is this option useful anyway?
#-printprogargs <file> <prog>
#Prints data dimension (and type, if relevant) arguments for a specific
# Camino program, where prog is one of shredder, scanner2voxel,
# vcthreshselect, pdview, track.
printprogargs = File(exists=True, argstr='-printprogargs %s', position=3,
desc=('Prints data dimension (and type, if relevant) '
'arguments for a specific Camino program, where '
'prog is one of shredder, scanner2voxel, '
'vcthreshselect, pdview, track.'))
printintelbyteorder = File(exists=True, argstr='-printintelbyteorder %s',
position=3,
desc=('Prints 1 if the header is little-endian, '
'0 otherwise.'))
printbigendian = File(exists=True, argstr='-printbigendian %s', position=3,
desc=('Prints 1 if the header is big-endian, 0 '
'otherwise.'))
initfromheader = File(exists=True, argstr='-initfromheader %s', position=3,
desc=('Reads header information from file and '
'intializes a new header with the values read '
'from the file. You may replace any '
'combination of fields in the new header by '
'specifying subsequent options.'))
data_dims = traits.List(traits.Int, desc = 'data dimensions in voxels',
argstr='-datadims %s', minlen=3, maxlen=3,
units='voxels')
voxel_dims = traits.List(traits.Float, desc = 'voxel dimensions in mm',
argstr='-voxeldims %s', minlen=3, maxlen=3,
units='mm')
centre = traits.List(traits.Int, argstr='-centre %s', minlen=3, maxlen=3,
units='mm',
desc=('Voxel specifying origin of Talairach '
'coordinate system for SPM, default [0 0 0].'))
picoseed = traits.List(traits.Int, argstr='-picoseed %s', minlen=3,
maxlen=3,
desc=('Voxel specifying the seed (for PICo maps), '
'default [0 0 0].'), units='mm')
nimages = traits.Int(argstr='-nimages %d', units='NA',
desc="Number of images in the img file. Default 1.")
datatype = traits.Enum('byte', 'char', '[u]short', '[u]int', 'float',
'complex', 'double', argstr='-datatype %s',
desc=('The char datatype is 8 bit (not the 16 bit '
'char of Java), as specified by the Analyze '
'7.5 standard. The byte, ushort and uint '
'types are not part of the Analyze '
'specification but are supported by SPM.'),
mandatory=True)
offset = traits.Int(argstr='-offset %d', units='NA',
desc=('According to the Analyze 7.5 standard, this is '
'the byte offset in the .img file at which '
'voxels start. This value can be negative to '
'specify that the absolute value is applied for '
'every image in the file.'))
greylevels = traits.List(traits.Int, argstr='-gl %s', minlen=2, maxlen=2,
desc=('Minimum and maximum greylevels. Stored as '
'shorts in the header.'), units='NA')
scaleslope = traits.Float(argstr='-scaleslope %d', units='NA',
desc=('Intensities in the image are scaled by '
'this factor by SPM and MRICro. Default is '
'1.0.'))
scaleinter = traits.Float(argstr='-scaleinter %d', units='NA',
desc=('Constant to add to the image intensities. '
'Used by SPM and MRIcro.'))
description = traits.String(argstr='-description %s',
desc=('Short description - No spaces, max '
'length 79 bytes. Will be null '
'terminated automatically.'))
intelbyteorder = traits.Bool(argstr='-intelbyteorder',
desc=("Write header in intel byte order "
"(little-endian)."))
networkbyteorder = traits.Bool(argstr='-networkbyteorder',
desc=("Write header in network byte order "
"(big-endian). This is the default "
"for new headers."))
class AnalyzeHeaderOutputSpec(TraitedSpec):
header = File(exists=True, desc='Analyze header')
class AnalyzeHeader(StdOutCommandLine):
"""
Create or read an Analyze 7.5 header file.
Analyze image header, provides support for the most common header fields.
Some fields, such as patient_id, are not currently supported. The program allows
three nonstandard options: the field image_dimension.funused1 is the image scale.
The intensity of each pixel in the associated .img file is (image value from file) * scale.
Also, the origin of the Talairach coordinates (midline of the anterior commisure) are encoded
in the field data_history.originator. These changes are included for compatibility with SPM.
All headers written with this program are big endian by default.
Example
-------
>>> import nipype.interfaces.camino as cmon
>>> hdr = cmon.AnalyzeHeader()
>>> hdr.inputs.in_file = 'tensor_fitted_data.Bdouble'
>>> hdr.inputs.scheme_file = 'A.scheme'
>>> hdr.inputs.data_dims = [256,256,256]
>>> hdr.inputs.voxel_dims = [1,1,1]
>>> hdr.run() # doctest: +SKIP
"""
_cmd = 'analyzeheader'
input_spec=AnalyzeHeaderInputSpec
output_spec=AnalyzeHeaderOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['header'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + ".hdr"
|
{
"content_hash": "13e3c9d8197f0aff41286c05dcce82b3",
"timestamp": "",
"source": "github",
"line_count": 583,
"max_line_length": 648,
"avg_line_length": 48.61578044596912,
"alnum_prop": 0.6298909783720849,
"repo_name": "mick-d/nipype_source",
"id": "2721be7fba5789ee820177bcc7a26d3fc7541084",
"size": "28343",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/camino/convert.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9090"
},
{
"name": "Matlab",
"bytes": "5018"
},
{
"name": "Python",
"bytes": "3773780"
},
{
"name": "Shell",
"bytes": "2959"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import os
import shutil
import json
from django.core.management.base import BaseCommand
from django.conf import settings
from twiggy_goodies.threading import log
from twiggy_goodies.django import LogMixin
from allmychanges.downloaders import guess_downloaders
from allmychanges.models import (
Changelog)
from allmychanges.utils import (
update_fields)
from clint.textui import progress
DOWNLOADERS_MAP = {
u'feed': 'feed',
u'git': 'vcs.git',
u'github_releases': 'github_releases',
u'git_commits': 'vcs.git_commits',
u'hg': 'vcs.hg',
u'http': 'http',
u'rechttp': 'http',
u'google_play': 'google_play',
u'itunes': 'appstore',
}
def parse_package_name(name):
try:
return {'pk': int(name)}
except ValueError:
pass
if '/' in name:
namespace, name = name.split('/', 1)
return dict(namespace=namespace,
name=name)
return dict(name=name)
def cleanup_source(source):
return source.split('+', 1)[-1]
def migrate_settings(downloader, ch):
downloader_settings = {}
prev_downloader = ch.downloader
import re
is_http = re.compile(ur'^https?://.*')
search_list = ch.search_list.split('\n')
ignore_list = ch.ignore_list.split('\n')
if downloader is 'http':
downloader_search_list = filter(
is_http.match, search_list)
if downloader_search_list:
downloader_settings['search_list'] = downloader_search_list
downloader_ignore_list = filter(
is_http.match, ignore_list)
if downloader_ignore_list:
downloader_settings['ignore_list'] = downloader_ignore_list
if prev_downloader == 'rechttp':
downloader_settings['recursive'] = True
search_list = filter(
lambda line: not is_http.match(line), search_list)
search_list = u'\n'.join(search_list)
ignore_list = filter(
lambda line: not is_http.match(line), ignore_list)
ignore_list = u'\n'.join(ignore_list)
return (downloader_settings,
search_list,
ignore_list,
ch.xslt)
def migrate(ch):
with log.name_and_fields('migrator',
changelog=u'{0.namespace}/{0.name}'.format(ch)):
cache_dir = os.path.join(settings.TEMP_DIR, 'git-cache')
if os.path.exists(cache_dir):
log.info('Removing cache_dir')
shutil.rmtree(cache_dir)
if not ch.name:
log.info('Has no name')
return 'has no name'
if not ch.downloaders:
log.info('Migrating')
downloaders = list(guess_downloaders(ch.source))
downloader = DOWNLOADERS_MAP.get(ch.downloader)
if downloader and downloaders:
downloader_names = set(d['name'] for d in downloaders)
if downloader not in downloader_names:
log.info('Downloader "{0}" is not in the list "{1}"'.format(
downloader,
', '.join(downloader_names)))
log.info('Done 1')
return 'downloader not in guessed'
if downloader == 'git':
versions_sources = set(
v.source.lower()
for v in ch.versions.all())
if 'vcs' in versions_sources:
downloader = 'vcs.git_commits'
source = cleanup_source(ch.source)
(downloader_settings,
search_list,
ignore_list,
xslt) = migrate_settings(downloader, ch)
try:
update_fields(ch,
source=source,
downloaders=downloaders,
downloader=downloader,
downloader_settings=downloader_settings,
search_list=search_list,
ignore_list=ignore_list)
except Exception as e:
if 'Duplicate entry' in str(e):
log.trace().error('Duplicate error')
return 'duplicate error'
raise
log.info('Downloader is "{0}"'.format(downloader))
else:
log.info(('No downloader or downloaders '
'and original downloader is "{0}"').format(
ch.downloader))
else:
log.info('Seems that changelog already migrated')
return 'already was migrated'
log.info('Done 2')
return 'migrated'
class Command(LogMixin, BaseCommand):
help = u"""Download package sources into a temporary directory."""
def handle(self, *args, **options):
filename = 'migration.json'
with open(filename, 'r') as f:
data = json.load(f)
for item in progress.bar(data):
try:
ch = Changelog.objects.get(pk=item['pk'])
except Changelog.DoesNotExist:
print 'does not exists', item['pk']
continue
if ch.name != item['name'] or ch.name != item['name']:
print 'name or namespace are not equal to the database for {0}'.format(item['pk'])
continue
try:
if 'pk' in item:
item.pop('pk')
update_fields(ch, **item)
except Exception as e:
if 'Duplicate entry' in str(e):
log.trace().error('Duplicate error')
print 'duplicate error', ch.id
continue
|
{
"content_hash": "75d0e67b30763aaf78524c420561ea5a",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 98,
"avg_line_length": 30.687830687830687,
"alnum_prop": 0.5251724137931034,
"repo_name": "AllMyChanges/allmychanges.com",
"id": "fff920cd843bd58e2479817b6c7e8f87021a18c7",
"size": "5817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allmychanges/management/commands/load_migrated.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "147634"
},
{
"name": "Dockerfile",
"bytes": "735"
},
{
"name": "Emacs Lisp",
"bytes": "905"
},
{
"name": "HTML",
"bytes": "96639"
},
{
"name": "JavaScript",
"bytes": "2645620"
},
{
"name": "Makefile",
"bytes": "7806"
},
{
"name": "Python",
"bytes": "752509"
},
{
"name": "Shell",
"bytes": "1426"
},
{
"name": "Stylus",
"bytes": "58519"
}
],
"symlink_target": ""
}
|
"""
Import Haringey
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode_point_only, PostcodeError
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Haringey Council
"""
council_id = 'E09000014'
addresses_name = 'PropertyPostCodePollingStationWebLookup-2016-04-05.TSV'
stations_name = 'PollingStations-2016-04-05.tsv'
csv_delimiter = '\t'
elections = [
'gla.c.2016-05-05',
'gla.a.2016-05-05',
'mayor.london.2016-05-05',
'ref.2016-06-23'
]
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.pollingplaceaddress1,
record.pollingplaceaddress2,
record.pollingplaceaddress3,
record.pollingplaceaddress4,
record.pollingplaceaddress5,
record.pollingplaceaddress6,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n").strip()
# no points supplied, so attempt to attach them by geocoding
if len(record.pollingplaceaddress7) <= 5:
location = None
else:
try:
gridref = geocode_point_only(record.pollingplaceaddress7)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except PostcodeError:
location = None
return {
'internal_council_id': record.pollingdistrictreference,
'postcode' : record.pollingplaceaddress7,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.propertynumber.strip() == '0':
address = record.streetname.strip()
else:
address = '%s %s' % (record.propertynumber.strip(), record.streetname.strip())
return {
'address' : address,
'postcode' : record.postcode.strip(),
'polling_station_id': record.pollingdistrictreference
}
|
{
"content_hash": "9de0ad2ab7fe00ed1de792334ed4019a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 90,
"avg_line_length": 34.27272727272727,
"alnum_prop": 0.5870910698496905,
"repo_name": "andylolz/UK-Polling-Stations",
"id": "dcaac01137bbc76b84ffd3a8d344f081afa4d3c7",
"size": "2262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_collection/management/commands/import_haringey.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "39334"
},
{
"name": "Cucumber",
"bytes": "545"
},
{
"name": "HTML",
"bytes": "27197"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "119884"
},
{
"name": "QML",
"bytes": "22833"
}
],
"symlink_target": ""
}
|
from kolibri.logger.models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, MasteryLog, UserSessionLog
from rest_framework import serializers
class ContentSessionLogSerializer(serializers.ModelSerializer):
class Meta:
model = ContentSessionLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',
'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')
class MasteryLogSerializer(serializers.ModelSerializer):
pastattempts = serializers.SerializerMethodField()
totalattempts = serializers.SerializerMethodField()
class Meta:
model = MasteryLog
fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts',
'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')
def get_pastattempts(self, obj):
# will return a list of the latest 10 correct and hint_taken fields for each attempt.
return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]
def get_totalattempts(self, obj):
return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()
class AttemptLogSerializer(serializers.ModelSerializer):
class Meta:
model = AttemptLog
fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',
'end_timestamp', 'completion_timestamp', 'item', 'time_spent',
'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
class ContentSummaryLogSerializer(serializers.ModelSerializer):
currentmasterylog = serializers.SerializerMethodField()
class Meta:
model = ContentSummaryLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',
'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')
def get_currentmasterylog(self, obj):
try:
current_log = obj.masterylogs.latest('end_timestamp')
return MasteryLogSerializer(current_log).data
except MasteryLog.DoesNotExist:
return None
class ContentRatingLogSerializer(serializers.ModelSerializer):
class Meta:
model = ContentRatingLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'quality', 'ease', 'learning', 'feedback')
class UserSessionLogSerializer(serializers.ModelSerializer):
class Meta:
model = UserSessionLog
fields = ('pk', 'user', 'channels', 'start_timestamp', 'completion_timestamp', 'pages')
|
{
"content_hash": "bac591f4bbd843139fc9ccfd3b6ff07c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 141,
"avg_line_length": 40.515151515151516,
"alnum_prop": 0.6802543006731488,
"repo_name": "jayoshih/kolibri",
"id": "2df69e0d7ad063546f6c985678127090e15c15af",
"size": "2674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kolibri/logger/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26076"
},
{
"name": "HTML",
"bytes": "3846"
},
{
"name": "JavaScript",
"bytes": "323517"
},
{
"name": "Makefile",
"bytes": "2869"
},
{
"name": "Python",
"bytes": "530090"
},
{
"name": "Shell",
"bytes": "6705"
},
{
"name": "Vue",
"bytes": "210205"
}
],
"symlink_target": ""
}
|
from django.conf import settings
def ae_vars(request):
p = {
'FUSIONTABLE_ID': settings.FUSIONTABLE_ID,
}
return p
|
{
"content_hash": "f75d8d6783c9af3edd0492622a5a9608",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 50,
"avg_line_length": 17.5,
"alnum_prop": 0.6142857142857143,
"repo_name": "devsar/ae-people",
"id": "44d401a9ed277d3bec8c616e6da518831b750cae",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/core/context_processors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "96930"
},
{
"name": "JavaScript",
"bytes": "970"
},
{
"name": "Python",
"bytes": "464854"
},
{
"name": "Shell",
"bytes": "376"
}
],
"symlink_target": ""
}
|
import datetime
from dateutil.parser import parse
from decimal import Decimal
import re
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils import datetime_safe, importlib
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.utils import dict_strip_unicode_keys, make_aware
class NOT_PROVIDED:
def __str__(self):
return 'No default provided.'
DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}).*?$')
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the ApiField variants.
class ApiField(object):
"""The base implementation of a field used by the resources."""
dehydrated_type = 'string'
help_text = ''
def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None):
"""
Sets up the field. This is generally called when the containing
``Resource`` is initialized.
Optionally accepts an ``attribute``, which should be a string of
either an instance attribute or callable off the object during the
``dehydrate`` or push data onto an object during the ``hydrate``.
Defaults to ``None``, meaning data will be manually accessed.
Optionally accepts a ``default``, which provides default data when the
object being ``dehydrated``/``hydrated`` has no data on the field.
Defaults to ``NOT_PROVIDED``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
# Track what the index thinks this field is called.
self.instance_name = None
self._resource = None
self.attribute = attribute
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.value = None
self.unique = unique
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
# Do the least we can here so that we don't hate ourselves in the
# morning.
self.instance_name = name
self._resource = cls
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def dehydrate(self, bundle):
"""
Takes data from the provided object and prepares it for the
resource.
"""
if self.attribute is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.attribute.split('__')
current_object = bundle.obj
for attr in attrs:
previous_object = current_object
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr))
if callable(current_object):
current_object = current_object()
return self.convert(current_object)
if self.has_default():
return self.convert(self.default)
else:
return None
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if not bundle.data.has_key(self.instance_name):
if getattr(self, 'is_related', False) and not getattr(self, 'is_m2m', False):
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
if self.blank:
return None
elif self.attribute and getattr(bundle.obj, self.attribute, None):
return getattr(bundle.obj, self.attribute)
elif self.instance_name and hasattr(bundle.obj, self.instance_name):
return getattr(bundle.obj, self.instance_name)
elif self.has_default():
if callable(self._default):
return self._default()
return self._default
elif self.null:
return None
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
return bundle.data[self.instance_name]
class CharField(ApiField):
"""
A text field of arbitrary length.
Covers both ``models.CharField`` and ``models.TextField``.
"""
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return unicode(value)
class FileField(ApiField):
"""
A file-related field.
Covers both ``models.FileField`` and ``models.ImageField``.
"""
dehydrated_type = 'string'
help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"'
def convert(self, value):
if value is None:
return None
try:
# Try to return the URL if it's a ``File``, falling back to the string
# itself if it's been overridden or is a default.
return getattr(value, 'url', value)
except ValueError:
return None
class IntegerField(ApiField):
"""
An integer field.
Covers ``models.IntegerField``, ``models.PositiveIntegerField``,
``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.
"""
dehydrated_type = 'integer'
help_text = 'Integer data. Ex: 2673'
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(ApiField):
"""
A floating point field.
"""
dehydrated_type = 'float'
help_text = 'Floating point numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(ApiField):
"""
A decimal field.
"""
dehydrated_type = 'decimal'
help_text = 'Fixed precision numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return Decimal(value)
class BooleanField(ApiField):
"""
A boolean field.
Covers both ``models.BooleanField`` and ``models.NullBooleanField``.
"""
dehydrated_type = 'boolean'
help_text = 'Boolean data. Ex: True'
def convert(self, value):
if value is None:
return None
return bool(value)
class ListField(ApiField):
"""
A list field.
"""
dehydrated_type = 'list'
help_text = "A list of data. Ex: ['abc', 26.73, 8]"
def convert(self, value):
if value is None:
return None
return list(value)
class DictField(ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return dict(value)
class DateField(ApiField):
"""
A date field.
"""
dehydrated_type = 'date'
help_text = 'A date as a string. Ex: "2010-11-10"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATE_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
if hasattr(value, 'hour'):
value = value.date()
except ValueError:
pass
return value
class DateTimeField(ApiField):
"""
A datetime field.
"""
dehydrated_type = 'datetime'
help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second'])))
else:
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateTimeField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
except ValueError:
pass
return value
class RelatedField(ApiField):
"""
Provides access to data that is related within the database.
The ``RelatedField`` base class is not intended for direct use but provides
functionality that ``ToOneField`` and ``ToManyField`` build upon.
The contents of this field actually point to another ``Resource``,
rather than the related object. This allows the field to represent its data
in different ways.
The abstractions based around this are "leaky" in that, unlike the other
fields provided by ``tastypie``, these fields don't handle arbitrary objects
very well. The subclasses use Django's ORM layer to make things go, though
there is no ORM-specific code at this level.
"""
dehydrated_type = 'related'
is_related = True
self_referential = False
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None):
"""
Builds the field and prepares it to access to related data.
The ``to`` argument should point to a ``Resource`` class, NOT
to a ``Model``. Required.
The ``attribute`` argument should specify what field/callable points to
the related data on the instance object. Required.
Optionally accepts a ``related_name`` argument. Currently unused, as
unlike Django's ORM layer, reverse relations between ``Resource``
classes are not automatically created. Defaults to ``None``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``full``, which indicates how the related
``Resource`` will appear post-``dehydrate``. If ``False``, the
related ``Resource`` will appear as a URL to the endpoint of that
resource. If ``True``, the result of the sub-resource's
``dehydrate`` will be included in full.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
self.instance_name = None
self._resource = None
self.to = to
self.attribute = attribute
self.related_name = related_name
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.full = full
self.api_name = None
self.resource_name = None
self.unique = unique
self._to_class = None
if self.to == 'self':
self.self_referential = True
self._to_class = self.__class__
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
super(RelatedField, self).contribute_to_class(cls, name)
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.self_referential or self.to == 'self':
self._to_class = cls
def get_related_resource(self, related_instance):
"""
Instaniates the related resource.
"""
related_resource = self.to_class()
# Fix the ``api_name`` if it's not present.
if related_resource._meta.api_name is None:
if self._resource and not self._resource._meta.api_name is None:
related_resource._meta.api_name = self._resource._meta.api_name
# Try to be efficient about DB queries.
related_resource.instance = related_instance
return related_resource
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, basestring):
self._to_class = self.to
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name))
return self._to_class
def dehydrate_related(self, bundle, related_resource):
"""
Based on the ``full_resource``, returns either the endpoint or the data
from ``full_dehydrate`` for the related resource.
"""
if not self.full:
# Be a good netizen.
return related_resource.get_resource_uri(bundle)
else:
# ZOMG extra data and big payloads.
bundle = related_resource.build_bundle(obj=related_resource.instance, request=bundle.request)
return related_resource.full_dehydrate(bundle)
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
"""
Given a URI is provided, the related resource is attempted to be
loaded based on the identifiers in the URI.
"""
try:
obj = fk_resource.get_via_uri(uri, request=request)
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
except ObjectDoesNotExist:
raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri)
def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):
"""
Given a dictionary-like structure is provided, a fresh related
resource is created using that data.
"""
# Try to hydrate the data provided.
data = dict_strip_unicode_keys(data)
fk_bundle = fk_resource.build_bundle(data=data, request=request)
if related_obj:
fk_bundle.related_obj = related_obj
fk_bundle.related_name = related_name
# We need to check to see if updates are allowed on the FK
# resource. If not, we'll just return a populated bundle instead
# of mistakenly updating something that should be read-only.
if not fk_resource.can_update():
return fk_resource.full_hydrate(fk_bundle)
try:
return fk_resource.obj_update(fk_bundle, skip_errors=True, **data)
except NotFound:
try:
# Attempt lookup by primary key
lookup_kwargs = dict((k, v) for k, v in data.iteritems() if getattr(fk_resource, k).unique)
if not lookup_kwargs:
raise NotFound()
return fk_resource.obj_update(fk_bundle, skip_errors=True, **lookup_kwargs)
except NotFound:
fk_bundle = fk_resource.full_hydrate(fk_bundle)
fk_resource.is_valid(fk_bundle, request)
return fk_bundle
except MultipleObjectsReturned:
return fk_resource.full_hydrate(fk_bundle)
def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):
"""
Given an object with a ``pk`` attribute, the related resource
is attempted to be loaded via that PK.
"""
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
def build_related_resource(self, value, request=None, related_obj=None, related_name=None):
"""
Returns a bundle of data built by the related resource, usually via
``hydrate`` with the data provided.
Accepts either a URI, a data dictionary (or dictionary-like structure)
or an object with a ``pk``.
"""
self.fk_resource = self.to_class()
kwargs = {
'request': request,
'related_obj': related_obj,
'related_name': related_name,
}
if isinstance(value, basestring):
# We got a URI. Load the object and assign it.
return self.resource_from_uri(self.fk_resource, value, **kwargs)
elif hasattr(value, 'items'):
# We've got a data dictionary.
# Since this leads to creation, this is the only one of these
# methods that might care about "parent" data.
return self.resource_from_data(self.fk_resource, value, **kwargs)
elif hasattr(value, 'pk'):
# We've got an object with a primary key.
return self.resource_from_pk(self.fk_resource, value, **kwargs)
else:
raise ApiFieldError("The '%s' field has was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value))
class ToOneField(RelatedField):
"""
Provides access to related data via foreign key.
This subclass requires Django's ORM layer to work properly.
"""
help_text = 'A single related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToOneField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.fk_resource = None
def dehydrate(self, bundle):
attrs = self.attribute.split('__')
foreign_obj = bundle.obj
for attr in attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
if not foreign_obj:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
self.fk_resource = self.get_related_resource(foreign_obj)
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
return self.dehydrate_related(fk_bundle, self.fk_resource)
def hydrate(self, bundle):
value = super(ToOneField, self).hydrate(bundle)
if value is None:
return value
return self.build_related_resource(value, request=bundle.request)
class ForeignKey(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToOneField(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class ToManyField(RelatedField):
"""
Provides access to related data via a join table.
This subclass requires Django's ORM layer to work properly.
Note that the ``hydrate`` portions of this field are quite different than
any other field. ``hydrate_m2m`` actually handles the data and relations.
This is due to the way Django implements M2M relationships.
"""
is_m2m = True
help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToManyField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.m2m_bundles = []
def dehydrate(self, bundle):
if not bundle.obj or not bundle.obj.pk:
if not self.null:
raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj)
return []
the_m2ms = None
previous_obj = bundle.obj
attr = self.attribute
if isinstance(self.attribute, basestring):
attrs = self.attribute.split('__')
the_m2ms = bundle.obj
for attr in attrs:
previous_obj = the_m2ms
try:
the_m2ms = getattr(the_m2ms, attr, None)
except ObjectDoesNotExist:
the_m2ms = None
if not the_m2ms:
break
elif callable(self.attribute):
the_m2ms = self.attribute(bundle)
if not the_m2ms:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return []
self.m2m_resources = []
m2m_dehydrated = []
# TODO: Also model-specific and leaky. Relies on there being a
# ``Manager`` there.
for m2m in the_m2ms.all():
m2m_resource = self.get_related_resource(m2m)
m2m_bundle = Bundle(obj=m2m, request=bundle.request)
self.m2m_resources.append(m2m_resource)
m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource))
return m2m_dehydrated
def hydrate(self, bundle):
pass
def hydrate_m2m(self, bundle):
if self.readonly:
return None
if bundle.data.get(self.instance_name) is None:
if self.blank:
return []
elif self.null:
return []
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name)
m2m_hydrated = []
for value in bundle.data.get(self.instance_name):
if value is None:
continue
kwargs = {
'request': bundle.request,
}
if self.related_name:
kwargs['related_obj'] = bundle.obj
kwargs['related_name'] = self.related_name
m2m_hydrated.append(self.build_related_resource(value, **kwargs))
return m2m_hydrated
class ManyToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class TimeField(ApiField):
dehydrated_type = 'time'
help_text = 'A time as string. Ex: "20:05:23"'
def dehydrate(self, obj):
return self.convert(super(TimeField, self).dehydrate(obj))
def convert(self, value):
if isinstance(value, basestring):
return self.to_time(value)
return value
def to_time(self, s):
try:
dt = parse(s)
except ValueError, e:
raise ApiFieldError(str(e))
else:
return datetime.time(dt.hour, dt.minute, dt.second)
def hydrate(self, bundle):
value = super(TimeField, self).hydrate(bundle)
if value and not isinstance(value, datetime.time):
value = self.to_time(value)
return value
|
{
"content_hash": "f5b24351e0cc05ddd7980a35f6ef82d1",
"timestamp": "",
"source": "github",
"line_count": 805,
"max_line_length": 181,
"avg_line_length": 34.64099378881988,
"alnum_prop": 0.5980778885462239,
"repo_name": "rbraley/django-tastypie",
"id": "0ccf3c25aeed7c827367610abec79c7c4df23f89",
"size": "27886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tastypie/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "528375"
},
{
"name": "Shell",
"bytes": "842"
}
],
"symlink_target": ""
}
|
import hashlib
import json
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
# Each segment, except for the final one, must be at least 1 megabyte
MIN_SEGMENT_SIZE = 1024 * 1024
class ObjectSloTest(base.BaseObjectTest):
def setUp(self):
super(ObjectSloTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
self.objects = []
def tearDown(self):
for obj in self.objects:
try:
self.object_client.delete_object(
self.container_name,
obj)
except exceptions.NotFound:
pass
self.container_client.delete_container(self.container_name)
super(ObjectSloTest, self).tearDown()
def _create_object(self, container_name, object_name, data, params=None):
resp, _ = self.object_client.create_object(container_name,
object_name,
data,
params)
self.objects.append(object_name)
return resp
def _create_manifest(self):
# Create a manifest file for SLO uploading
object_name = data_utils.rand_name(name='TestObject')
object_name_base_1 = object_name + '_01'
object_name_base_2 = object_name + '_02'
data_size = MIN_SEGMENT_SIZE
self.content = data_utils.arbitrary_string(data_size)
self._create_object(self.container_name,
object_name_base_1,
self.content)
self._create_object(self.container_name,
object_name_base_2,
self.content)
path_object_1 = '/%s/%s' % (self.container_name,
object_name_base_1)
path_object_2 = '/%s/%s' % (self.container_name,
object_name_base_2)
data_manifest = [{'path': path_object_1,
'etag': hashlib.md5(self.content).hexdigest(),
'size_bytes': data_size},
{'path': path_object_2,
'etag': hashlib.md5(self.content).hexdigest(),
'size_bytes': data_size}]
return json.dumps(data_manifest)
def _create_large_object(self):
# Create a large object for preparation of testing various SLO
# features
manifest = self._create_manifest()
params = {'multipart-manifest': 'put'}
object_name = data_utils.rand_name(name='TestObject')
self._create_object(self.container_name,
object_name,
manifest,
params)
return object_name
def _assertHeadersSLO(self, resp, method):
# When sending GET or HEAD requests to SLO the response contains
# 'X-Static-Large-Object' header
if method in ('GET', 'HEAD'):
self.assertIn('x-static-large-object', resp)
self.assertEqual(resp['x-static-large-object'], 'True')
# Etag value of a large object is enclosed in double-quotations.
# After etag quotes are checked they are removed and the response is
# checked if all common headers are present and well formatted
self.assertTrue(resp['etag'].startswith('\"'))
self.assertTrue(resp['etag'].endswith('\"'))
resp['etag'] = resp['etag'].strip('"')
self.assertHeaders(resp, 'Object', method)
@test.attr(type='gate')
def test_upload_manifest(self):
# create static large object from multipart manifest
manifest = self._create_manifest()
params = {'multipart-manifest': 'put'}
object_name = data_utils.rand_name(name='TestObject')
resp = self._create_object(self.container_name,
object_name,
manifest,
params)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self._assertHeadersSLO(resp, 'PUT')
@test.attr(type='gate')
def test_list_large_object_metadata(self):
# list static large object metadata using multipart manifest
object_name = self._create_large_object()
resp, body = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self._assertHeadersSLO(resp, 'HEAD')
@test.attr(type='gate')
def test_retrieve_large_object(self):
# list static large object using multipart manifest
object_name = self._create_large_object()
resp, body = self.object_client.get_object(
self.container_name,
object_name)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self._assertHeadersSLO(resp, 'GET')
sum_data = self.content + self.content
self.assertEqual(body, sum_data)
@test.attr(type='gate')
def test_delete_large_object(self):
# delete static large object using multipart manifest
object_name = self._create_large_object()
params_del = {'multipart-manifest': 'delete'}
resp, body = self.object_client.delete_object(
self.container_name,
object_name,
params=params_del)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
# When deleting SLO using multipart manifest, the response contains
# not 'content-length' but 'transfer-encoding' header. This is the
# special case, therefore the existence of response headers is checked
# outside of custom matcher.
self.assertIn('transfer-encoding', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
resp, body = self.container_client.list_container_contents(
self.container_name)
self.assertEqual(int(resp['x-container-object-count']), 0)
|
{
"content_hash": "41392cd8484f358464aa74bf8114f32d",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 39.096385542168676,
"alnum_prop": 0.5793528505392912,
"repo_name": "nikolay-fedotov/tempest",
"id": "159ad5cd567f3dca7cfeed28a4dfaf2c94c80663",
"size": "7098",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/object_storage/test_object_slo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Model Definitions"""
# flake8: noqa
# first-party
from tcex.api.tc.ti_transform.model.transform_model import (
AttributeTransformModel,
DatetimeTransformModel,
GroupTransformModel,
IndicatorTransformModel,
MetadataTransformModel,
SecurityLabelTransformModel,
TagTransformModel,
TiTransformModel,
TransformModel,
)
|
{
"content_hash": "dc5adc2042b8988bedc6b0d50de62733",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 25.214285714285715,
"alnum_prop": 0.7592067988668555,
"repo_name": "ThreatConnect-Inc/tcex",
"id": "48a201b3cb543ccb79708d3424477476f16cc71e",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tcex/api/tc/ti_transform/model/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2735042"
}
],
"symlink_target": ""
}
|
from .widget import ChartWidget
from .item import CandleItem, VolumeItem
|
{
"content_hash": "35d5f095d4daabc540af8bf4379d813a",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 36.5,
"alnum_prop": 0.8356164383561644,
"repo_name": "bigdig/vnpy",
"id": "4d090ff441196dcf03af89402e9ab51a2cf83c74",
"size": "73",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vnpy/chart/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "390"
},
{
"name": "C",
"bytes": "1652953"
},
{
"name": "C++",
"bytes": "13737810"
},
{
"name": "Objective-C",
"bytes": "1200"
},
{
"name": "Python",
"bytes": "2979947"
},
{
"name": "Shell",
"bytes": "6050"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class Error_YValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="error_y", parent_name="histogram", **kwargs):
super(Error_YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ErrorY"),
data_docs=kwargs.pop(
"data_docs",
"""
array
Sets the data corresponding the length of each
error bar. Values are plotted relative to the
underlying data.
arrayminus
Sets the data corresponding the length of each
error bar in the bottom (left) direction for
vertical (horizontal) bars Values are plotted
relative to the underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud
for arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud
for array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have
the same length in both direction (top/bottom
for vertical bars, left/right for horizontal
bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error
bars. If *constant`, the bar lengths are of a
constant value. Set this constant in `value`.
If "percent", the bar lengths correspond to a
percentage of underlying data. Set this
percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the
underlying data. If "data", the bar lengths are
set with data set `array`.
value
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars.
valueminus
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
visible
Determines whether or not this set of error
bars is visible.
width
Sets the width (in px) of the cross-bar at both
ends of the error bars.
""",
),
**kwargs
)
|
{
"content_hash": "1b4a332b9936041bc37cdbc460285f19",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 81,
"avg_line_length": 42.04225352112676,
"alnum_prop": 0.5360134003350083,
"repo_name": "plotly/python-api",
"id": "cb02b5a25e17827db6d11cb04b5a45a35b32d7b3",
"size": "2985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram/_error_y.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class HotelUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.address = None
self.city = None
self.country = None
self.decorate_time = None
self.desc = None
self.district = None
self.domestic = None
self.hid = None
self.level = None
self.name = None
self.opening_time = None
self.orientation = None
self.pic = None
self.province = None
self.rooms = None
self.service = None
self.storeys = None
self.tel = None
def getapiname(self):
return 'taobao.hotel.update'
def getMultipartParas(self):
return ['pic']
|
{
"content_hash": "7dddc471a2675d42f210af83f8271221",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 55,
"avg_line_length": 23.741935483870968,
"alnum_prop": 0.6589673913043478,
"repo_name": "CooperLuan/devops.notes",
"id": "3e69c3f4b6953645691c402ec94d3c3193a51457",
"size": "736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taobao/top/api/rest/HotelUpdateRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "211546"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
}
|
"""sqlalchemy_postgresql_json
Adds JSON support to SQLAlchemy, with magic helpers to keep things mutable
And now adds LTREE support as well
"""
from setuptools import setup, find_packages
VERSION = (0, 5, 0)
VERSION_STRING = ".".join(map(str, VERSION))
setup(name='sqlalchemy-postgresql-json',
author="Franklyn Tackitt",
author_email="franklyn@tackitt.net",
url="https://github.com/DisruptiveLabs/sqlalchemy_postgresql_json",
download_url="https://github.com/DisruptiveLabs/sqlalchemy_postgresql_json/tarball/%s" % VERSION_STRING,
version=VERSION_STRING,
description="Postgresql JSON Extension for sqlalchemy",
long_description=__doc__,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'psycopg2',
'sqlalchemy>=1.1'
])
|
{
"content_hash": "00229c699ed56a0c07831c7b077fd7e0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 110,
"avg_line_length": 32.84615384615385,
"alnum_prop": 0.6873536299765808,
"repo_name": "DisruptiveLabs/sqlalchemy_postgresql_json",
"id": "fb5c99b70edccbc09dcd5f154ecb97084d6da552",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11858"
}
],
"symlink_target": ""
}
|
""" This module provides the fixtures for the PYTEST runs.
"""
import numpy as np
import tempfile
import pytest
import os
@pytest.fixture(scope='function')
def set_seed():
""" Each test is executed with the same random seed.
"""
np.random.seed(1223)
@pytest.fixture(scope='function')
def fresh_directory():
""" Each test is executed in a fresh directory.
"""
os.chdir(tempfile.mkdtemp())
|
{
"content_hash": "10cb5610890e8014b5a9e811474d77d8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 19.857142857142858,
"alnum_prop": 0.6834532374100719,
"repo_name": "restudToolbox/package",
"id": "33670f7fe2b49c4ea1d5d1dbb03b111adf32e998",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "respy/tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "571229"
},
{
"name": "HCL",
"bytes": "342"
},
{
"name": "Python",
"bytes": "417314"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
}
|
#|##############################################################################
#|Copyright (c) 2009, The Green-Span Project. All rights reserved. This code is
#|Open Source Free Software - redistribution and use in source and binary forms,
#|with or without modification, are permitted under the Two Clause BSD License.
#|##############################################################################
#|File Created: 2009-04-02
#|Author(s): Sean Hastings,
#|##############################################################################
VERBOSE = True
from globals import ALLVERBOSE
from twisted.mail import imap4
from zope.interface import implements
import os
from imapproxymailbox import ImapProxyMailbox
class ImapProxyAccount(object):
implements(imap4.IAccount)
def __init__(self, addresspwd, proxy):
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.__init__(%s)" % addresspwd
#initialize local object variables
self.connected = False #becomes True when server connection is established
self.server = None #upstream refference to server - instanced from cache or by deffered method (see firstConnect)
self.proxy = proxy #downsream reference to proxy
#Client side oppinion of IMAP protocol state - server side opinion at self.server.selected
self.selected = None #starts with no mailkbox selected
def firstConnect(self, protocol):
"""Calback function when server connection first succeeds"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.firstConnect - WOOHOO! PROXY CONNECTED THROUGH!"
self.server.connected = True
self.server.protocol = protocol
return self.getSubscribed()
def connectError(self):
"""Callback function if server connection fails"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.connectError"
print >> sys.stderr, "Error:", error.getErrorMessage()
def getSubscribed(self):
"""gets subscribed mailboxes from server - should do this at first connect"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.getSubscribed()"
d = self.server.protocol.lsub("","*")
d.addCallback(self.__getSubscribed_cb)
return d
def __getSubscribed_cb(self,results):
"""LSUB returned successfully - add all to subscribed list"""
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__getSubscribed_cb"
print results
subscribed_list = []
for boxinfo in results:
name = boxinfo[2]
box = self._getBox(boxinfo) #creates box and caches it locally
subscribed_list.append(name)
self.server.subscribed = subscribed_list
def addMailbox(self,name, mbox = None):
"""Add a new mailbox to this account
@type name: C{str}
@param name: The name associated with this mailbox. It may not
contain multiple hierarchical parts.
@type mbox: An object implementing C{IMailbox}
@param mbox: The mailbox to associate with this name. If C{None},
a suitable default is created and used.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added for
some reason. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.addMailbox"
raise imap4.MailboxException("Permision denied - addMailbox function not yet implimented")
def create(self,path):
"""Create a new mailbox from the given hierarchical name.
@type path: C{str}
@param path: The full hierarchical name of a new mailbox to create.
If any of the inferior hierarchical names to this one do not exist,
they are created as well.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added.
This may also be raised asynchronously, if a C{Deferred} is
returned.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.create(%s)" % path
#raise exception if mailbox exists
if self.server.mailboxCache.has_key(path):
raise imap4.MailboxException("Mailbox '%s' already exists" % path)
d = self.server.protocol.create(path)
d.addCallback(self.__create_cb,path)
d.addErrback(self.__create_err,path)
return d #returns Deferred
def __create_cb(self, result, path):
"deferred CREATE cmd succeeds - return True"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__create_cb('%s')" % path
print result
return True
def __create_err(self, result, path):
"deferred CREATE cmd failed - return False"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__create_err('%s')" % path
print result
raise imap4.MailboxException("Server could not create mailbox")
def select(self,name, rw = True):
"""Acquire a mailbox, given its name.
@type name: C{str}
@param name: The mailbox to acquire
@type rw: C{bool}
@param rw: If a true value, request a read-write version of this
mailbox. If a false value, request a read-only version.
@rtype: Any object implementing C{IMailbox} or C{Deferred}
@return: The mailbox object, or a C{Deferred} whose callback will
be invoked with the mailbox object. None may be returned if the
specified mailbox may not be selected for any reason.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.select(%s)" % name
if rw:
d = self.server.protocol.select(name)
d.addCallback(self.__select_cb,name,rw)
else: #inspect command used for non read write look at mailbox
d = self.server.protocol.examine(name)
d.addCallback(self.__select_cb,name,rw)
return d
def __select_cb(self,boxinfo,path,rw):
"Return Selected box and register it as the selected box in the account"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__select_cb('%s')" % path
print boxinfo
boxinfo['PATH'] = path #adds path value to boxinfo for mailbox object creation/storgage
box = self._getBox(boxinfo)
#set selected on server if read write version requested/selected
if rw: self.server.selected = path
return box
def delete(self,name):
"""Delete the mailbox with the specified name.
@type name: C{str}
@param name: The mailbox to delete.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully deleted, or a
C{Deferred} whose callback will be invoked when the deletion
completes.
@raise MailboxException: Raised if this mailbox cannot be deleted.
This may also be raised asynchronously, if a C{Deferred} is returned.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.delete('%s')" % name
d = self.server.protocol.delete(name)
d.addCallback(self.__delete_cb,name)
d.addErrback(self.__delete_err,name)
return d #returns Deferred
def __delete_cb(self,result,name):
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.delete_cb('%s')" % name
print result
#remove proxy references
if self.server.mailboxCache.has_key(name): del self.maiboxCache[name]
for i in range(self.server.subscribed.count(name)): self.server.subscribed.count.remove(name)
if self.server.selected == name: self.server.selected = None
if self.selected == name: self.selected = None
return True
def __delete_err(self,reason,name):
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__delete_err('%s')" % name
print reason
raise imap4.MailboxException("Unable to delete mailbox: %s" % name)
def rename(self,oldname, newname):
"""Rename a mailbox
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to associate with the mailbox.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully renamed, or a
C{Deferred} whose callback will be invoked when the rename operation
is completed.
@raise MailboxException: Raised if this mailbox cannot be
renamed. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.rename('%s','%s')" % (oldname, newname)
d = self.server.protocol.rename(oldname,newname)
d.addCallback(self.__rename_cb,oldname,newname)
d.addErrback(self.__rename_err,oldname,newname)
return d #returns Deferred
def __rename_cb(self,result,oldname,newname):
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.rename_cb('%s','%s')" % (oldname,newname)
print result
#remove proxy references
if self.server.mailboxCache.has_key(oldname):
self.server.mailboxCache[newname] = self.maiboxCache[oldname]
del self.server.maiboxCache[oldname]
self.server.mailboxCache[newname].rename(newname)
for i in range(self.server.subscribed.count(oldname)):
self.server.subscribed.remove(oldname)
self.server.subscribed.append(newname)
if self.selected == oldname: self.selected = newname
if self.server.selected == oldname: self.server.selected = newname
return True
def __renameFailed(self,reason,oldname,newname):
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__delete_err('%s','%s')" % (oldname, newname)
print reason
except_string = "Unable to rename mailbox %s to %s" % (oldname, newname)
raise imap4.MailboxException(except_string)
def isSubscribed(self,name):
"""Check the subscription status of a mailbox
@type name: C{str}
@param name: The name of the mailbox to check
@rtype: C{Deferred} or C{bool}
@return: A true value if the given mailbox is currently subscribed
to, a false value otherwise. A C{Deferred} may also be returned
whose callback will be invoked with one of these values.
"""
#Currently just checks subscriptions in memory with no call to server
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.isSubscribed(%s)" % name
result = self.server.subscribed.count(name)
if ALLVERBOSE or VERBOSE:
print self.server.subscribed
if result: print "SUBSCRIBED = TRUE"
else: print "SUBSCRIBED = FALSE"
if result: return True
else: return False
""" Deffered version of is_subscribed
#though specified in docs - deffered is never called back
#Currently just checks subscriptions in memory with no call to server
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.isSubscribed(%s)" % name
d = self.server.protocol.lsub("",name)
d.addCallback(self.__isSubscribed_cb,name)
d.addErrback(self.__isSubscribed_err,name)
return d
"""
def __isSubscribed_cb(self,results,name):
"""LSUB returned successfully - if specified box found - update/create it and return true"""
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.isSubscribed(%s)" % name
results
#results come in as a list of lists - each row contains several mailbox parameters
#boxinfo[2] is the mailbox name/path
for boxinfo in results:
result_name = boxinfo[2]
if result_name == name:
dummy = self._getBox(boxinfo) #creates box and caches it locally
if ALLVERBOSE or VERBOSE: print "TRUE"
return True
if ALLVERBOSE or VERBOSE: print "FALSE"
return False
def __isSubscribed_err(self,results,name):
"""LSUB failed - print results for debugging purposes - return False"""
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.isSubscribed_err(%s)" % name
results
return False
def subscribe(self,name):
"""Subscribe to a mailbox
@type name: C{str}
@param name: The name of the mailbox to subscribe to
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is subscribed to successfully,
or a Deferred whose callback will be invoked with this value when
the subscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
subscribed to. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.subscribe(%s)" % name
d = self.server.protocol.subscribe(name)
d.addCallback(self.__subscribe_cb,name)
d.addErrback(self.__subscribe_err,name)
return d
def __subscribe_cb(self,result,name):
"deferred SUBSCRIBE cmd succeeds - records proxy subscription - returns False"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__subscribe_cb('%s')" % name
print result
if not self.server.subscribed.count(name): self.server.subscribed.append(name)
if ALLVERBOSE or VERBOSE: print self.server.subscribed
return True
def __subscribe_err(self,result,name):
"deferred SUBSCRIBE cmd fails - makes sure proxy shows not subscribed - returns False"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__subscribe_err('%s')" % name
print result
for i in range(self.server.subscribed.count(name)):
self.server.subscribed.remove(name)
raise imap4.MailboxException("Subrcribe failed for mailbox: %s" % name)
def unsubscribe(self,name):
"""Unsubscribe from a mailbox
@type name: C{str}
@param name: The name of the mailbox to unsubscribe from
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is unsubscribed from successfully,
or a Deferred whose callback will be invoked with this value when
the unsubscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
unsubscribed from. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount.unsubscribe"
#unsubscribe from proxy cache
for i in range(self.server.subscribed.count(name)):
self.server.subscribed.remove(name)
#unsubscribe from server
d = self.server.protocol.unsubscribe(name)
d.addCallback(self.__unsubscribe_cb,name)
d.addErrback(self.__unsubscribe_err,name)
return d
def __unsubscribe_cb(self,result,name):
"deffered UNSUBSCRIBE cmd succeeds - remove from proxy subscribed list - return True"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__unsubscribe_cb('%s')" % name
print result
return True
def __unsubscribe_err(self,result,name):
"deffered UNSUBSCRIBE cmd fails - raise exception"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__unsubscribe_err('%s')" % name
print result
#raise exception with server results
raise imap4.MailboxException("Unsubscribe failed for mailbox '%s'" % name)
def listMailboxes(self,ref, wildcard):
"""List all the mailboxes that meet a certain criteria
@type ref: C{str}
@param ref: The context in which to apply the wildcard
@type wildcard: C{str}
@param wildcard: An expression against which to match mailbox names.
'*' matches any number of characters in a mailbox name, and '%'
matches similarly, but will not match across hierarchical boundaries.
@rtype: C{list} of C{tuple}
@return: A list of C{(mailboxName, mailboxObject)} which meet the
given criteria. C{mailboxObject} should implement either
C{IMailboxInfo} or C{IMailbox}. A Deferred may also be returned.
"""
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.listMailboxes('%s','%s')" % (ref, wildcard)
d = self.server.protocol.list(ref,wildcard)
d.addCallback(self.__listMailboxes_cb,ref,wildcard)
return d
def __listMailboxes_cb(self, results, ref, wildcard):
"returns list of two_tuples (maibox name,proxy mailbox object)"
if ALLVERBOSE or VERBOSE:
print "ImapProxyAccount.__listMailboxes_cb('%s','%s')" % (ref,wildcard)
print results
#results come in as a list of lists - each row contains several mailbox parameters
#boxinfo[2] is the mailbox name/path
return_list = []
for boxinfo in results:
name = boxinfo[2]
box = self._getBox(boxinfo) #creates box and caches it locally
two_tuple = (name,box)
return_list.append(two_tuple)
return return_list
def _getBox(self,boxinfo):
"""
Returns box from cache OR creates it + adds it to cache + returns it
If boxinfo is a string - box of given path/name is returned from cache.
If boxinfo is a tupple or dict with attributes and box doesn't exist, it
is created, cached, and returned - if it exists it is updated and returned.
"""
if ALLVERBOSE or VERBOSE: print "ImapProxyAccount._getBox"
if isinstance(boxinfo,str): #box name/path only as str
if ALLVERBOSE or VERBOSE: print boxinfo
return self.server.mailboxCache[boxinfo]
if isinstance(boxinfo,tuple): #partial info as tupple from LIST command
boxname = boxinfo[2] #path string
elif isinstance(boxinfo,dict): #full info as dict from SELECT command
boxname = boxinfo['PATH']
else: #unknown type
raise TypeError("unknown mailbox info type")
if ALLVERBOSE or VERBOSE:
print boxname
print boxinfo
#If new box create - if old box update - then return box object
if not self.server.mailboxCache.has_key(boxname):
self.server.mailboxCache[boxname] = ImapProxyMailbox(boxinfo,self.server)
else:
self.server.mailboxCache[boxname].updateInfo(boxinfo)
return self.server.mailboxCache[boxname]
|
{
"content_hash": "c976ecc6d9dd04a957afc887ea7ad0a8",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 121,
"avg_line_length": 44.5,
"alnum_prop": 0.6212461695607763,
"repo_name": "green-span/green-mail",
"id": "062fefef3eee3a95b102c77eee9db8f0259fbfc7",
"size": "19580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/imapproxyaccount.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "313064"
}
],
"symlink_target": ""
}
|
"""Utility classes and methods for use with simplejson and appengine.
Provides both a specialized simplejson encoder, GqlEncoder, designed to simplify
encoding directly from GQL results to JSON. A helper function, encode, is also
provided to further simplify usage.
GqlEncoder: Adds support for GQL results and properties to simplejson.
encode(input): Direct method to encode GQL objects as JSON.
"""
import datetime
from django.utils import simplejson #import simplejson
import time
#from django.db import models
class GqlEncoder(simplejson.JSONEncoder):
"""Extends JSONEncoder to add support for GQL results and properties.
Adds support to simplejson JSONEncoders for GQL results and properties by
overriding JSONEncoder's default method.
"""
# TODO Improve coverage for all of App Engine's Property types.
def default(self, obj):
"""Tests the input object, obj, to encode as JSON."""
if hasattr(obj, '__json__'):
return getattr(obj, '__json__')()
if isinstance(obj, db.GqlQuery):
return list(obj)
elif isinstance(obj, db.Model):
properties = obj.properties().items()
output = {}
for field, value in properties:
output[field] = getattr(obj, field)
return output
elif isinstance(obj, datetime.datetime):
output = {}
fields = ['day', 'hour', 'microsecond', 'minute', 'month', 'second',
'year']
methods = ['ctime', 'isocalendar', 'isoformat', 'isoweekday',
'timetuple']
for field in fields:
output[field] = getattr(obj, field)
for method in methods:
output[method] = getattr(obj, method)()
output['epoch'] = time.mktime(obj.timetuple())
return output
elif isinstance(obj, time.struct_time):
return list(obj)
return simplejson.JSONEncoder().default(self, obj)
def encode(input):
"""Encode an input GQL object as JSON
Args:
input: A GQL object or DB property.
Returns:
A JSON string based on the input object.
Raises:
TypeError: Typically occurs when an input object contains an unsupported
type.
"""
return GqlEncoder().encode(input)
|
{
"content_hash": "22ffb31d31bd42a099bad068467fc8ed",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 80,
"avg_line_length": 28.08974358974359,
"alnum_prop": 0.6713829301688726,
"repo_name": "ashleyjsands/think-mind-map-web-app",
"id": "0011c45e2ed0447bab4b1813894f6a05e59e52b0",
"size": "2817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "think_web_app/think/json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33409"
},
{
"name": "JavaScript",
"bytes": "322422"
},
{
"name": "Python",
"bytes": "627128"
}
],
"symlink_target": ""
}
|
from .treedict import TreeDict, getTree, treeExists, HashError
|
{
"content_hash": "c9f78f569d04756d319ce079461d921e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 62,
"avg_line_length": 32,
"alnum_prop": 0.8125,
"repo_name": "hoytak/treedict",
"id": "f58fcabe18ff769e54fe21d61cdc0120b19bcb66",
"size": "64",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treedict/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "402"
},
{
"name": "Python",
"bytes": "377116"
},
{
"name": "Shell",
"bytes": "459"
}
],
"symlink_target": ""
}
|
import luigi, os, json
import os, requirements as reqq, simplejson, datetime, pickle, cPickle as pickle
from pypideps import PyPiDeps, _clean_requirements
class GetRepoRequirements(luigi.Task):
package = luigi.Parameter()
def run(self):
ppd = PyPiDeps(self.package)
with self.output().open('w') as out_file:
for l in ppd.reqs:
out_file.write(l + "\n")
def output(self):
return luigi.LocalTarget('./datastore/%s/required' % self.package)
class GenerateRepoUtilisation(luigi.Task):
def run(self):
output_requires = {}
output_required_by = {}
known_packages = os.listdir("./datastore")
for idx, package in enumerate(known_packages):
print "working on [%s:%s]: %s" % (idx, len(known_packages), package)
try:
requirements = _clean_requirements( open("./datastore/%s/required" % package).read().split("\n") )
except IOError:
continue
output_requires[package] = requirements
for r in requirements:
if r in output_required_by:
output_required_by[r].append(package)
else:
output_required_by[r] = [package]
counter = 0
for package, req_by in output_required_by.iteritems():
counter = counter + 1
print "writing [%s:%s]: %s" % (counter, len( output_required_by.keys() ), package)
out = open('./output/%s.json' % package, 'w')
try:
out_req = output_requires[package]
except KeyError:
out_req = []
json_out = {
'generated_at' : datetime.datetime.now(),
'package' : package,
'requires' : out_req,
'required_by' : req_by
}
out_content = simplejson.dumps(json_out, default=date_handler)
out.write(out_content)
out.close()
#generate a file with the most popular packages
popfile = open('./output/_popular.json', 'w')
popular_output = sorted( output_required_by.items(), key=lambda k: len(k[1]), reverse=True)
popfile.write( json.dumps(popular_output[:25]) )
popfile.close()
package_list = open('./output/_packagelist.json', 'w')
package_list.write( json.dumps( known_packages ) )
package_list.close()
#pickle.dump(output_required_by, open('output_required_by.pckl', 'w') )
#pickle.dump(output_requires, open('output_requires.pckl', 'w') )
'''
requirements = _clean_requirements(requirements)
back_requires[p] = requirements
for r in requirements:
if required_by.has_key(r):
required_by[r].append(p)
else:
required_by[r] = [p]
for k,v in required_by.iteritems():
#out = open('./output/%s' % k, 'w')
try:
req_temp = back_requires[k]
except KeyError:
req_temp = []
json_out = {
'generated_at' : datetime.datetime.now(),
'package' : k,
'requires' : req_temp,
'required_by' : v
}
print simplejson.dumps(json_out, default=date_handler)
'''
date_handler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
else None
)
|
{
"content_hash": "7202398c880c2e3bac04de21dc2624bc",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 114,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.5230217810862973,
"repo_name": "adamgilman/python-popular",
"id": "b301164d4e438e0cf9621c8a2ffad7a1f8cf9f86",
"size": "3627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luigi_getinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11614"
}
],
"symlink_target": ""
}
|
"""Private module full of compatibility hacks.
Primarily this is for downstream redistributions of requests that unvendor
urllib3 without providing a shim.
.. warning::
This module is private. If you use it, and something breaks, you were
warned
"""
import sys
import requests
try:
from requests.packages.urllib3 import fields
from requests.packages.urllib3 import filepost
from requests.packages.urllib3 import poolmanager
except ImportError:
from urllib3 import fields
from urllib3 import filepost
from urllib3 import poolmanager
try:
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3 import connection
except ImportError:
try:
from urllib3.connection import HTTPConnection
from urllib3 import connection
except ImportError:
HTTPConnection = None
connection = None
if requests.__build__ < 0x020300:
timeout = None
else:
try:
from requests.packages.urllib3.util import timeout
except ImportError:
from urllib3.util import timeout
if requests.__build__ < 0x021000:
gaecontrib = None
else:
try:
from requests.packages.urllib3.contrib import appengine as gaecontrib
except ImportError:
from urllib3.contrib import appengine as gaecontrib
if requests.__build__ < 0x021200:
PyOpenSSLContext = None
else:
try:
from requests.packages.urllib3.contrib.pyopenssl \
import PyOpenSSLContext
except ImportError:
try:
from urllib3.contrib.pyopenssl import PyOpenSSLContext
except ImportError:
PyOpenSSLContext = None
PY3 = sys.version_info > (3, 0)
if PY3:
from collections.abc import Mapping, MutableMapping
import queue
from urllib.parse import urlencode, urljoin
else:
from collections import Mapping, MutableMapping
import Queue as queue
from urllib import urlencode
from urlparse import urljoin
try:
basestring = basestring
except NameError:
basestring = (str, bytes)
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = {}
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = (key, val)
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
"""D.pop(k[,d]) -> v, remove specified key and return its value.
If key is not found, d is returned if given, otherwise KeyError is
raised.
"""
# Using the MutableMapping function directly fails due to the private
# marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
self._container[key_lower] = [vals[0], vals[1], val]
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
__all__ = (
'basestring',
'connection',
'fields',
'filepost',
'poolmanager',
'timeout',
'HTTPHeaderDict',
'queue',
'urlencode',
'gaecontrib',
'urljoin',
'PyOpenSSLContext',
)
|
{
"content_hash": "0c0e17f7928e6c85566823bd4e8b82cc",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 30.53395061728395,
"alnum_prop": 0.5911250379055898,
"repo_name": "catapult-project/catapult",
"id": "622e77fa198e80cc4efdfdad2e8596c1b2c712f0",
"size": "9893",
"binary": false,
"copies": "15",
"ref": "refs/heads/main",
"path": "third_party/requests_toolbelt/requests_toolbelt/_compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
}
|
"""Test the Philips TV config flow."""
from unittest.mock import ANY, patch
from haphilipsjs import PairingFailure
from pytest import fixture
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.philips_js.const import CONF_ALLOW_NOTIFY, DOMAIN
from . import (
MOCK_CONFIG,
MOCK_CONFIG_PAIRED,
MOCK_PASSWORD,
MOCK_SYSTEM_UNPAIRED,
MOCK_USERINPUT,
MOCK_USERNAME,
)
from tests.common import MockConfigEntry
@fixture(autouse=True, name="mock_setup_entry")
def mock_setup_entry_fixture():
"""Disable component setup."""
with patch(
"homeassistant.components.philips_js.async_setup_entry", return_value=True
) as mock_setup_entry, patch(
"homeassistant.components.philips_js.async_unload_entry", return_value=True
):
yield mock_setup_entry
@fixture
async def mock_tv_pairable(mock_tv):
"""Return a mock tv that is pariable."""
mock_tv.system = MOCK_SYSTEM_UNPAIRED
mock_tv.pairing_type = "digest_auth_pairing"
mock_tv.api_version = 6
mock_tv.api_version_detected = 6
mock_tv.secured_transport = True
mock_tv.pairRequest.return_value = {}
mock_tv.pairGrant.return_value = MOCK_USERNAME, MOCK_PASSWORD
return mock_tv
async def test_form(hass, mock_setup_entry):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Philips TV (1234567890)"
assert result2["data"] == MOCK_CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass, mock_tv):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tv.system = None
result = await hass.config_entries.flow.async_configure(
result["flow_id"], MOCK_USERINPUT
)
assert result["type"] == "form"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_unexpected_error(hass, mock_tv):
"""Test we handle unexpected exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tv.getSystem.side_effect = Exception("Unexpected exception")
result = await hass.config_entries.flow.async_configure(
result["flow_id"], MOCK_USERINPUT
)
assert result["type"] == "form"
assert result["errors"] == {"base": "unknown"}
async def test_pairing(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tv.setTransport.assert_called_with(True)
mock_tv.pairRequest.assert_called()
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result == {
"flow_id": ANY,
"type": "create_entry",
"description": None,
"description_placeholders": None,
"handler": "philips_js",
"result": ANY,
"title": "55PUS7181/12 (ABCDEFGHIJKLF)",
"data": MOCK_CONFIG_PAIRED,
"version": 1,
"options": {},
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_pair_request_failed(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
mock_tv.pairRequest.side_effect = PairingFailure({})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result == {
"flow_id": ANY,
"description_placeholders": {"error_id": None},
"handler": "philips_js",
"reason": "pairing_failure",
"type": "abort",
}
async def test_pair_grant_failed(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tv.setTransport.assert_called_with(True)
mock_tv.pairRequest.assert_called()
# Test with invalid pin
mock_tv.pairGrant.side_effect = PairingFailure({"error_id": "INVALID_PIN"})
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result["type"] == "form"
assert result["errors"] == {"pin": "invalid_pin"}
# Test with unexpected failure
mock_tv.pairGrant.side_effect = PairingFailure({})
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result == {
"flow_id": ANY,
"description_placeholders": {"error_id": None},
"handler": "philips_js",
"reason": "pairing_failure",
"type": "abort",
}
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="123456",
data=MOCK_CONFIG_PAIRED,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_ALLOW_NOTIFY: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_ALLOW_NOTIFY: True}
|
{
"content_hash": "bc0fb4abfd824e1f7aef239f5e2d4beb",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 83,
"avg_line_length": 29.713043478260868,
"alnum_prop": 0.6391571553994733,
"repo_name": "rohitranjan1991/home-assistant",
"id": "ace6219511564bcb95ae897120eb44a2b83968c8",
"size": "6834",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/philips_js/test_config_flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import ConfigParser
import time
################################### PART2 CLASS && FUNCTION ###########################
class InitializationAndLoadParameter(object):
def __init__(self, log_data_dir):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = log_data_dir,#'./2.log',#log_data_dir,
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = InitializationAndLoadParameter.__name__))
def __del__(self):
logging.info("Success in quiting MySQL.")
logging.info("END CLASS {class_name}.".format(class_name = InitializationAndLoadParameter.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = InitializationAndLoadParameter.__name__, delta_time = self.end - self.start))
def load_parameter(self, config_data_dir):
conf = ConfigParser.ConfigParser()
conf.read(config_data_dir)
pyspark_app_name = conf.get("basic", "pyspark_app_name")
log_data_dir = conf.get("basic", "log_data_dir")
logging.info("pyspark_app_name: {pyspark_app_name}".format(pyspark_app_name = pyspark_app_name))
logging.info("log_data_dir: {log_data_dir}".format(log_data_dir = log_data_dir))
database_name = conf.get("database", "database_name")
database_password = conf.get("database", "database_password")
message_table_name = conf.get("database", "message_table_name")
word_table_name = conf.get("database", "word_table_name")
logging.info("database_name: {database_name}".format(database_name = database_name))
logging.info("database_password: {database_password}".format(database_password = database_password))
logging.info("message_table_name: {message_table_name}".format(message_table_name = message_table_name))
logging.info("word_table_name: {word_table_name}".format(word_table_name = word_table_name))
train_data_dir = conf.get("data", "train_data_dir")
test_data_dir = conf.get("data", "test_data_dir")
stopword_data_dir = conf.get("data", "stopword_data_dir")
logging.info("train_data_dir: {train_data_dir}".format(train_data_dir = train_data_dir))
logging.info("test_data_dir: {test_data_dir}".format(test_data_dir = test_data_dir))
logging.info("stopword_data_dir: {stopword_data_dir}".format(stopword_data_dir = stopword_data_dir))
return pyspark_app_name, log_data_dir, database_name, database_password, message_table_name, word_table_name, train_data_dir, test_data_dir, stopword_data_dir
################################### PART3 CLASS TEST ##################################
"""
config_data_dir = "../config.ini"
ParameterLoader = InitializationAndLoadParameter(config_data_dir = config_data_dir)
appName, log_data_dir, database_name, database_password,\
message_table_name, word_table_name, train_data_dir,\
test_data_dir, stopword_data_dir = ParameterLoader.load_parameter(config_data_dir = config_data_dir)
"""
|
{
"content_hash": "5bdb2a74a983b20c3b72ce618026f83e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 178,
"avg_line_length": 50.726027397260275,
"alnum_prop": 0.6257088846880907,
"repo_name": "ysh329/spam-msg-classifier",
"id": "37745a1d7b4ed3e70fe835f770dec25b8950b591",
"size": "3993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myclass/class_initialization_and_load_parameter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "139815"
}
],
"symlink_target": ""
}
|
from io import BytesIO
import numpy as np
import warnings
from .. import Variable
from ..conventions import cf_encoder
from ..core.pycompat import iteritems, basestring, unicode_type, OrderedDict
from ..core.utils import Frozen, FrozenOrderedDict
from ..core.indexing import NumpyIndexingAdapter
from .common import AbstractWritableDataStore
from .netcdf3 import (is_valid_nc3_name, coerce_nc3_dtype,
encode_nc3_attr_value, encode_nc3_variable)
from xray.conventions import cf_decoder
def _decode_string(s):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
return s
def _decode_attrs(d):
# don't decode _FillValue from bytes -> unicode, because we want to ensure
# that its type matches the data exactly
return OrderedDict((k, v if k == '_FillValue' else _decode_string(v))
for (k, v) in iteritems(d))
class ScipyArrayWrapper(NumpyIndexingAdapter):
def __init__(self, netcdf_file, variable_name):
self.netcdf_file = netcdf_file
self.variable_name = variable_name
@property
def array(self):
# We can't store the actual netcdf_variable object or its data array,
# because otherwise scipy complains about variables or files still
# referencing mmapped arrays when we try to close datasets without
# having read all data in the file.
return self.netcdf_file.variables[self.variable_name].data
@property
def dtype(self):
# always use native endianness
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
data = super(ScipyArrayWrapper, self).__getitem__(key)
# Copy data if the source file is mmapped. This makes things consistent
# with the netCDF4 library by ensuring we can safely read arrays even
# after closing associated files.
copy = self.netcdf_file.use_mmap
data = np.array(data, dtype=self.dtype, copy=copy)
return data
class ScipyDataStore(AbstractWritableDataStore):
"""Store for reading and writing data via scipy.io.netcdf.
This store has the advantage of being able to be initialized with a
StringIO object, allow for serialization without writing to disk.
It only supports the NetCDF3 file-format.
"""
def __init__(self, filename_or_obj, mode='r', format=None, group=None,
writer=None, mmap=None):
import scipy
import scipy.io
if mode != 'r' and scipy.__version__ < '0.13': # pragma: no cover
warnings.warn('scipy %s detected; '
'the minimal recommended version is 0.13. '
'Older version of this library do not reliably '
'read and write files.'
% scipy.__version__, ImportWarning)
if group is not None:
raise ValueError('cannot save to a group with the '
'scipy.io.netcdf backend')
if format is None or format == 'NETCDF3_64BIT':
version = 2
elif format == 'NETCDF3_CLASSIC':
version = 1
else:
raise ValueError('invalid format for scipy.io.netcdf backend: %r'
% format)
# if filename is a NetCDF3 bytestring we store it in a StringIO
if (isinstance(filename_or_obj, basestring)
and filename_or_obj.startswith('CDF')):
# TODO: this check has the unfortunate side-effect that
# paths to files cannot start with 'CDF'.
filename_or_obj = BytesIO(filename_or_obj)
self.ds = scipy.io.netcdf_file(
filename_or_obj, mode=mode, mmap=mmap, version=version)
super(ScipyDataStore, self).__init__(writer)
def store(self, variables, attributes):
# All Scipy objects get CF encoded by default, without this attempting
# to write times, for example, would fail.
cf_variables, cf_attrs = cf_encoder(variables, attributes)
AbstractWritableDataStore.store(self, cf_variables, cf_attrs)
def open_store_variable(self, name, var):
return Variable(var.dimensions, ScipyArrayWrapper(self.ds, name),
_decode_attrs(var._attributes))
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
return Frozen(_decode_attrs(self.ds._attributes))
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def set_dimension(self, name, length):
if name in self.dimensions:
raise ValueError('%s does not support modifying dimensions'
% type(self).__name__)
self.ds.createDimension(name, length)
def _validate_attr_key(self, key):
if not is_valid_nc3_name(key):
raise ValueError("Not a valid attribute name")
def set_attribute(self, key, value):
self._validate_attr_key(key)
value = encode_nc3_attr_value(value)
setattr(self.ds, key, value)
def prepare_variable(self, name, variable):
# TODO, create a netCDF3 encoder
variable = encode_nc3_variable(variable)
self.set_necessary_dimensions(variable)
data = variable.data
# nb. this still creates a numpy array in all memory, even though we
# don't write the data yet; scipy.io.netcdf does not not support
# incremental writes.
self.ds.createVariable(name, data.dtype, variable.dims)
scipy_var = self.ds.variables[name]
for k, v in iteritems(variable.attrs):
self._validate_attr_key(k)
setattr(scipy_var, k, v)
return scipy_var, data
def sync(self):
super(ScipyDataStore, self).sync()
self.ds.flush()
def close(self):
self.ds.close()
def __exit__(self, type, value, tb):
self.close()
|
{
"content_hash": "4c1c385a541322912469179efedcd752",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 38.28481012658228,
"alnum_prop": 0.6263845263679947,
"repo_name": "kjordahl/xray",
"id": "872c55fe2ce9602942c3b296e1d9b1a26f8a0988",
"size": "6049",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "xray/backends/scipy_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "684347"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
}
|
from asyncio import get_event_loop, set_event_loop_policy
from pathlib import Path
from cyrandom import uniform, randint, choice
from argparse import ArgumentParser
try:
from uvloop import EventLoopPolicy
set_event_loop_policy(EventLoopPolicy())
except ImportError:
pass
import time
import logging
import sys
monocle_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(monocle_dir))
from monocle import names, sanitized as conf
parser = ArgumentParser()
parser.add_argument(
'-i', '--id',
type=int,
help='Pokémon ID to notify about'
)
parser.add_argument(
'-lat', '--latitude',
type=float,
help='latitude for fake spawn'
)
parser.add_argument(
'-lon', '--longitude',
type=float,
help='longitude for fake spawn'
)
parser.add_argument(
'-r', '--remaining',
type=int,
help='seconds remaining on fake spawn'
)
parser.add_argument(
'-u', '--unmodified',
action='store_true',
help="don't add ID to ALWAYS_NOTIFY_IDS"
)
args = parser.parse_args()
if args.id is not None:
pokemon_id = args.id
if args.id == 0:
names.POKEMON[0] = 'Test'
else:
pokemon_id = randint(1, 387)
if not args.unmodified:
conf.ALWAYS_NOTIFY_IDS = {pokemon_id}
conf.HASHTAGS = {'test'}
from monocle.notification import Notifier
from monocle.shared import SessionManager
from monocle.names import MOVES
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
root.addHandler(ch)
MOVES = tuple(MOVES.keys())
if args.latitude is not None:
lat = args.latitude
else:
lat = uniform(conf.MAP_START[0], conf.MAP_END[0])
if args.longitude is not None:
lon = args.longitude
else:
lon = uniform(conf.MAP_START[1], conf.MAP_END[1])
if args.remaining:
tth = args.remaining
else:
tth = uniform(89, 3599)
now = time.time()
pokemon = {
'encounter_id': 93253523,
'spawn_id': 3502935,
'pokemon_id': pokemon_id,
'time_till_hidden': tth,
'lat': lat,
'lon': lon,
'individual_attack': randint(0, 15),
'individual_defense': randint(0, 15),
'individual_stamina': randint(0, 15),
'seen': now,
'move_1': choice(MOVES),
'move_2': choice(MOVES),
'valid': True,
'expire_timestamp': now + tth
}
notifier = Notifier()
loop = get_event_loop()
if loop.run_until_complete(notifier.notify(pokemon, randint(1, 2))):
print('Success')
else:
print('Failure')
SessionManager.close()
loop.close()
|
{
"content_hash": "437618076c3ac91ca2841b3aea4f6768",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 68,
"avg_line_length": 21.367521367521366,
"alnum_prop": 0.6712,
"repo_name": "sebast1219/Monocle",
"id": "23ee344f98af491d15b46b95d064d9e2ba8b2d9e",
"size": "2525",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "scripts/test_notifications.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3189"
},
{
"name": "HTML",
"bytes": "24630"
},
{
"name": "JavaScript",
"bytes": "17996"
},
{
"name": "PLpgSQL",
"bytes": "1059"
},
{
"name": "Python",
"bytes": "377347"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='base.html')),
# Examples:
# url(r'^$', 'icecreamratings_project.views.home', name='home'),
# url(r'^icecreamratings_project/', include('icecreamratings_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "ebbd301790d5456f41394de4a112588c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 85,
"avg_line_length": 35.65,
"alnum_prop": 0.7096774193548387,
"repo_name": "tayyabano1/icecreamratings_project",
"id": "f4acae3fd796f7a924ca7acc1f91cf439ef6a6ae",
"size": "713",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "icecreamratings_project/icecream/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "22233"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from sys import stdin
from src.classes.sockets import Socket
__author__ = 'Aldo Roman Nurena'
def __main__():
client_socket()
def client_socket():
"""
Receive text from user and sends to main server.
:return:
"""
print "Write some text: (line by line)"
while True:
s = Socket.get_instance()
s.connect(('localhost',1234))
line = stdin.readline()
# s.send(line.length)
s.send(line)
response = s.recv(64)
print(response)
s.close()
if line.strip() == "EOF":
break
__main__()
|
{
"content_hash": "81c92720ee6c45248862dd7568610b2a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 52,
"avg_line_length": 17.939393939393938,
"alnum_prop": 0.5489864864864865,
"repo_name": "aldo-roman/python_samples",
"id": "ce0c66be1dc5c571bd426c8715cf1eb509352015",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/client_socket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4259"
}
],
"symlink_target": ""
}
|
"""
Copied from Chrome's src/tools/valgrind/memcheck/PRESUBMIT.py
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
import os
import re
import sys
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
# Add the path to the Chrome valgrind dir to the import path:
tools_vg_path = os.path.join(input_api.PresubmitLocalPath(), '..', '..',
'valgrind')
sys.path.append(tools_vg_path)
import suppressions
sup_regex = re.compile('suppressions.*\.txt$')
suppressions = {}
errors = []
check_for_memcheck = False
# skip_next_line has 3 possible values:
# - False: don't skip the next line.
# - 'skip_suppression_name': the next line is a suppression name, skip.
# - 'skip_param': the next line is a system call parameter error, skip.
skip_next_line = False
for f in filter(lambda x: sup_regex.search(x.LocalPath()),
input_api.AffectedFiles()):
for line, line_num in zip(f.NewContents(),
xrange(1, len(f.NewContents()) + 1)):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
if skip_next_line == 'skip_suppression_name':
if 'insert_a_suppression_name_here' in line:
errors.append('"insert_a_suppression_name_here" is not a valid '
'suppression name')
if suppressions.has_key(line):
if f.LocalPath() == suppressions[line][1]:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][1]))
else:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at %s line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][0], suppressions[line][1]))
else:
suppressions[line] = (f, line_num)
check_for_memcheck = True;
skip_next_line = False
continue
if check_for_memcheck:
if not line.startswith('Memcheck:'):
errors.append('"%s" should be "Memcheck:..." in %s line %s' %
(line, f.LocalPath(), line_num))
check_for_memcheck = False;
if line == '{':
skip_next_line = 'skip_suppression_name'
continue
if line == "Memcheck:Param":
skip_next_line = 'skip_param'
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line.startswith('Memcheck:') or line == '}' or
line == '...'):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTrySlaves():
# We don't have any memcheck slaves yet, so there's no use for this method.
# When we have, the slave name(s) should be put into this list.
return []
|
{
"content_hash": "8ffc0d152cd805862eee17f4106878e8",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 80,
"avg_line_length": 38.8,
"alnum_prop": 0.5735967926689576,
"repo_name": "wangscript/libjingle",
"id": "116f20d1188a0940263b9acd5e211feba71b60b0",
"size": "4952",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tools/valgrind-libjingle/memcheck/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "547136"
},
{
"name": "C++",
"bytes": "8636650"
},
{
"name": "HTML",
"bytes": "6973"
},
{
"name": "Java",
"bytes": "115974"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Objective-C",
"bytes": "5890"
},
{
"name": "Objective-C++",
"bytes": "15753"
},
{
"name": "Python",
"bytes": "107568"
},
{
"name": "Shell",
"bytes": "2130"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals, division
import gzip as _gzip
__all__ = ["gzip"]
def gzip(original, compressed, *gzip_args, **gzip_kwargs):
orig = comp = None
try:
orig = open(original, "rb")
comp = _gzip.open(compressed, "wb", *gzip_args, **gzip_kwargs)
comp.writelines(orig)
finally:
if comp:
comp.close()
if orig:
orig.close()
|
{
"content_hash": "500fdab4d140c35f6f77403fb671f623",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 23,
"alnum_prop": 0.5697940503432495,
"repo_name": "za-creature/gulpless",
"id": "71deffa7b220c7ee4464e058544111e6ae075db8",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gulpless/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33369"
}
],
"symlink_target": ""
}
|
first_name = ['Helena', 'Ben', 'Jake', 'Bella', 'Viktor', 'Ville', 'Molly']
last_name = ["O'Donnell", "Flygare", ]
for first in first_name:
for last in last_name:
print(first+last)
if first_name is ('Helena', 'Viktor'):
print()
#fam_names = ('Ben', 'Helena', 'Jake', 'Bella', 'Viktor', 'Ville', 'Molly')
#for names in fam_names:
# print("Hi there {}, how are you {}??".format(names, names))
|
{
"content_hash": "a2fbd3df50d01244d755de0a767c7317",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 28.2,
"alnum_prop": 0.574468085106383,
"repo_name": "BIMobject-Ben/test",
"id": "87d51065e6917ac6e9c06dcfc6eb23964250174a",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Learing files/labs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28070"
}
],
"symlink_target": ""
}
|
"""
Example ussage of the read_multi_vars function
This was tested against a S7-319 CPU
"""
import ctypes
import snap7
from snap7.common import check_error
from snap7.types import S7DataItem, S7AreaDB, S7WLByte
client = snap7.client.Client()
client.connect('10.100.5.2', 0, 2)
data_items = (S7DataItem * 3)()
data_items[0].Area = ctypes.c_int32(S7AreaDB)
data_items[0].WordLen = ctypes.c_int32(S7WLByte)
data_items[0].Result = ctypes.c_int32(0)
data_items[0].DBNumber = ctypes.c_int32(200)
data_items[0].Start = ctypes.c_int32(16)
data_items[0].Amount = ctypes.c_int32(4) # reading a REAL, 4 bytes
data_items[1].Area = ctypes.c_int32(S7AreaDB)
data_items[1].WordLen = ctypes.c_int32(S7WLByte)
data_items[1].Result = ctypes.c_int32(0)
data_items[1].DBNumber = ctypes.c_int32(200)
data_items[1].Start = ctypes.c_int32(12)
data_items[1].Amount = ctypes.c_int32(4) # reading a REAL, 4 bytes
data_items[2].Area = ctypes.c_int32(S7AreaDB)
data_items[2].WordLen = ctypes.c_int32(S7WLByte)
data_items[2].Result = ctypes.c_int32(0)
data_items[2].DBNumber = ctypes.c_int32(200)
data_items[2].Start = ctypes.c_int32(2)
data_items[2].Amount = ctypes.c_int32(2) # reading an INT, 2 bytes
# create buffers to receive the data
# use the Amount attribute on each item to size the buffer
for di in data_items:
# create the buffer
buffer = ctypes.create_string_buffer(di.Amount)
# cast the pointer to the buffer to the required type
pBuffer = ctypes.cast(ctypes.pointer(buffer),
ctypes.POINTER(ctypes.c_uint8))
di.pData = pBuffer
result, data_items = client.read_multi_vars(data_items)
for di in data_items:
check_error(di.Result)
result_values = []
# function to cast bytes to match data_types[] above
byte_to_value = [util.get_real, util.get_real, util.get_int]
# unpack and test the result of each read
for i in range(0, len(data_items)):
btv = byte_to_value[i]
di = data_items[i]
value = btv(di.pData, 0)
result_values.append(value)
print(result_values)
client.disconnect()
client.destroy()
|
{
"content_hash": "f107168758413286ddfb44b9c927b102",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 67,
"avg_line_length": 30.308823529411764,
"alnum_prop": 0.7083939835031539,
"repo_name": "gijzelaerr/python-snap7",
"id": "fd7c2e31c09ca789d752aa0db946c42bbaa6a09e",
"size": "2061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/read_multi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "288"
},
{
"name": "Makefile",
"bytes": "934"
},
{
"name": "Python",
"bytes": "267248"
}
],
"symlink_target": ""
}
|
"""
Support for myStrom switches.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.mystrom/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_HOST)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-mystrom==0.3.8']
DEFAULT_NAME = 'myStrom Switch'
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Find and return myStrom switch."""
from pymystrom import MyStromPlug, exceptions
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
try:
MyStromPlug(host).get_status()
except exceptions.MyStromConnectionError:
_LOGGER.error("No route to device '%s'", host)
return False
add_devices([MyStromSwitch(name, host)])
class MyStromSwitch(SwitchDevice):
"""Representation of a myStrom switch."""
def __init__(self, name, resource):
"""Initialize the myStrom switch."""
from pymystrom import MyStromPlug
self._name = name
self._resource = resource
self.data = {}
self.plug = MyStromPlug(self._resource)
self.update()
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return bool(self.data['relay'])
@property
def current_power_w(self):
"""Return the current power consumption in W."""
return round(self.data['power'], 2)
def turn_on(self, **kwargs):
"""Turn the switch on."""
from pymystrom import exceptions
try:
self.plug.set_relay_on()
except exceptions.MyStromConnectionError:
_LOGGER.error("No route to device '%s'. Is device offline?",
self._resource)
def turn_off(self, **kwargs):
"""Turn the switch off."""
from pymystrom import exceptions
try:
self.plug.set_relay_off()
except exceptions.MyStromConnectionError:
_LOGGER.error("No route to device '%s'. Is device offline?",
self._resource)
def update(self):
"""Get the latest data from the device and update the data."""
from pymystrom import exceptions
try:
self.data = self.plug.get_status()
except exceptions.MyStromConnectionError:
self.data = {'power': 0, 'relay': False}
_LOGGER.error("No route to device '%s'. Is device offline?",
self._resource)
|
{
"content_hash": "49d86498d8eae244207dc05e71ccb506",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 75,
"avg_line_length": 29.938144329896907,
"alnum_prop": 0.6260330578512396,
"repo_name": "shaftoe/home-assistant",
"id": "e813da43dfae8fdac3d6111808bfef1af3d7bb15",
"size": "2904",
"binary": false,
"copies": "16",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/mystrom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1584258"
},
{
"name": "Python",
"bytes": "5479272"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15017"
}
],
"symlink_target": ""
}
|
"""Sparse Tensor Representation. See the @{$python/sparse_ops} guide.
@@SparseTensor
@@SparseTensorValue
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
@@sparse_concat
@@sparse_reorder
@@sparse_reshape
@@sparse_slice
@@sparse_split
@@sparse_retain
@@sparse_reset_shape
@@sparse_fill_empty_rows
@@sparse_transpose
@@sparse_reduce_max
@@sparse_reduce_max_sparse
@@sparse_reduce_sum
@@sparse_reduce_sum_sparse
@@sparse_add
@@sparse_softmax
@@sparse_tensor_dense_matmul
@@sparse_maximum
@@sparse_minimum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
# pylint: disable=protected-access
@tf_export("sparse_concat")
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: shape = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:]
if axis == -1 else shape[axis:axis + 1], []
if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_add")
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The shapes of the two operands must match: broadcasting is not supported.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
b = _convert_to_sparse_tensor(b)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype.base_dtype, name="thresh")
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape, thresh))
# Attempt to get output_shape statically.
a.get_shape().assert_is_compatible_with(b.get_shape())
static_shape = array_ops.broadcast_static_shape(a.get_shape(),
b.get_shape())
if static_shape.is_fully_defined():
output_shape = static_shape.as_list()
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values,
a.dense_shape, b)
def _sparse_cross(inputs, name=None):
"""Generates sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: "a_X_d_X_f"
[1, 0]: "b_X_e_X_g"
[1, 1]: "c_X_e_X_g"
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `string`.
"""
return _sparse_cross_internal(inputs=inputs, hashed_output=False, name=name)
def _sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):
"""Generates hashed sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: FingerprintCat64(
Fingerprint64("f"), FingerprintCat64(
Fingerprint64("d"), Fingerprint64("a")))
[1, 0]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("b")))
[1, 1]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("c")))
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
num_buckets: An `int` that is `>= 0`.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
hash_key: Integer hash_key that will be used by the `FingerprintCat64`
function. If not given, will use a default key.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `int64`.
"""
return _sparse_cross_internal(
inputs=inputs,
hashed_output=True,
num_buckets=num_buckets,
hash_key=hash_key,
name=name)
_DEFAULT_HASH_KEY = 0xDECAFCAFFE
def _sparse_cross_internal(inputs,
hashed_output=False,
num_buckets=0,
hash_key=None,
name=None):
"""See gen_sparse_ops.sparse_cross."""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(
isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor)
for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [
i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)
]
dense_inputs = [
i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)
]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
hashed_output=hashed_output,
num_buckets=num_buckets,
hash_key=hash_key or _DEFAULT_HASH_KEY,
out_type=out_type,
internal_type=internal_type,
name=name)
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
def sparse_dense_cwise_add(sp_t, dense_t):
"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By the rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_t: the SparseTensor operand.
dense_t: the dense Tensor operand; must have the same dtype and a
broadcast-compatible shape as `sp_t`.
Returns:
output: the SparseTensor output.
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.dense_shape, dense_t)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
@tf_export("sparse_reorder")
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
reordered_ind, reordered_val = (
gen_sparse_ops.sparse_reorder(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
if sp_input.get_shape().is_fully_defined():
dense_shape = sp_input.get_shape().as_list()
else:
dense_shape = array_ops.identity(sp_input.dense_shape)
return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)
@tf_export("sparse_reshape")
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
This operation has the same semantics as `reshape` on the represented dense
tensor. The indices of non-empty values in `sp_input` are recomputed based
on the new dense shape, and a new `SparseTensor` is returned containing the
new indices and new shape. The order of non-empty values in `sp_input` is
unchanged.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `shape` can be -1. The number of dense elements
implied by `shape` must be the same as the number of dense elements
originally represented by `sp_input`.
For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
[0, 0, 0]: a
[0, 0, 1]: b
[0, 1, 0]: c
[1, 0, 0]: d
[1, 2, 3]: e
and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
shape `[9, 4]` and `indices` / `values`:
[0, 0]: a
[0, 1]: b
[1, 2]: c
[4, 2]: d
[8, 1]: e
Args:
sp_input: The input `SparseTensor`.
shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same non-empty values but with indices calculated
by the new dense shape.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If argument `shape` requests a `SparseTensor` with a different
number of elements than `sp_input`.
ValueError: If `shape` has more than one inferred (== -1) dimension.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
shape = math_ops.cast(shape, dtype=dtypes.int64)
with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
reshaped_ind, reshaped_shape = gen_sparse_ops.sparse_reshape(
sp_input.indices, sp_input.dense_shape, shape, name=name)
reshaped_shape_const = tensor_util.constant_value(shape)
if (reshaped_shape_const is not None and
sp_input.get_shape().is_fully_defined()):
num_implied = sum((dim == -1) for dim in reshaped_shape_const)
if num_implied > 1:
raise ValueError("At most one dimension can be inferred (-1). Found: %s"
% reshaped_shape_const)
original_reshaped_shape = list(reshaped_shape_const) # Copy.
in_shape_size = np.prod(sp_input.get_shape().as_list())
if num_implied:
implied_idx = original_reshaped_shape.index(-1)
non_implied_idx = (
original_reshaped_shape[:implied_idx] +
original_reshaped_shape[implied_idx + 1:])
reshaped_shape_const[implied_idx] = (
in_shape_size // np.prod(non_implied_idx))
reshaped_size = np.prod(reshaped_shape_const)
if reshaped_size != in_shape_size:
raise ValueError("Cannot reshape a tensor with %d elements to shape %s "
"(%d elements)." %
(in_shape_size, original_reshaped_shape,
reshaped_size))
reshaped_shape = reshaped_shape_const
return sparse_tensor.SparseTensor(reshaped_ind,
array_ops.identity(sp_input.values),
reshaped_shape)
# TODO(aselle): Remove keyword required once for 1.0 final
class KeywordRequired(object):
def __repr__(self):
# This is needed to make documentation without fully qualified module paths
return "KeywordRequired()"
@tf_export("sparse_split")
def sparse_split(keyword_required=KeywordRequired(),
sp_input=None,
num_split=None,
axis=None,
name=None,
split_dim=None):
"""Split a `SparseTensor` into `num_split` tensors along `axis`.
If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[axis] % num_split` gets extra one
dimension. For example, if `axis = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
keyword_required: Python 2 standin for * (temporary for argument reorder)
sp_input: The `SparseTensor` to split.
num_split: A Python integer. The number of ways to split.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
name: A name for the operation (optional).
split_dim: Deprecated old name for axis.
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If the deprecated `split_dim` and `axis` are both non None.
"""
if not isinstance(keyword_required, KeywordRequired):
raise ValueError("Keyword arguments are required for this function.")
if sp_input is None:
raise ValueError("sp_input is required")
if num_split is None:
raise ValueError("num_split is required")
if axis is None:
raise ValueError("axis is required")
axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
split_dim)
sp_input = _convert_to_sparse_tensor(sp_input)
output_inds, output_vals, output_shapes = (
gen_sparse_ops.sparse_split(
axis,
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
sparse_tensor.SparseTensor(output_inds[i], output_vals[i],
output_shapes[i]))
return sparse_tensors
@tf_export("sparse_slice")
def sparse_slice(sp_input, start, size, name=None):
"""Slice a `SparseTensor` based on the `start` and `size.
For example, if the input is
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
[ a ]
[b c ]
sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
[ d e ]
[ ]
Args:
sp_input: The `SparseTensor` to split.
start: 1-D. tensor represents the start of the slice.
size: 1-D. tensor represents the size of the slice.
name: A name for the operation (optional).
Returns:
A `SparseTensor` objects resulting from splicing.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
start = ops.convert_to_tensor(start, dtypes.int64)
size = ops.convert_to_tensor(size, dtypes.int64)
with ops.name_scope(name, "SparseSlice", [sp_input]) as name:
output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
start,
size,
name=name)
return sparse_tensor.SparseTensor(output_indices, output_values,
output_shape)
@tf_export("sparse_to_dense")
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops.sparse_to_dense(
sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export("sparse_reduce_max")
def sparse_reduce_max(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_max(x) ==> 3
tf.sparse_reduce_max(x, 0) ==> [1, 3, 2]
tf.sparse_reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
tf.sparse_reduce_max(x, 1, keep_dims=True) ==> [[2], [3]]
tf.sparse_reduce_max(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_max(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)
@tf_export("sparse_reduce_max_sparse")
def sparse_reduce_max_sparse(sp_input,
axis=None,
keep_dims=False,
reduction_axes=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_max_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_reduce_sum")
def sparse_reduce_sum(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_sum(x) ==> 3
tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]
tf.sparse_reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_sum(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)
@tf_export("sparse_reduce_sum_sparse")
def sparse_reduce_sum_sparse(sp_input,
axis=None,
keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_sum_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_tensor_to_dense")
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.dense_shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return sparse_to_dense(
sp_input.indices,
sp_input.dense_shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export("sparse_to_indicator")
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = sparse_tensor.SparseTensor(sp_input.indices, new_values,
sp_input.dense_shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(
sp_new, default_value=False, validate_indices=False, name=name)
@tf_export("sparse_merge")
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
```python
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
```
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
```python
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
```
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function along with vocab_size=6, will produce a `SparseTensor` that
sparsely represents all three instances. Namely, the `indices` property will
contain the coordinates of the non-zero entries in the feature matrix (the
first dimension is the row number in the matrix, i.e., the index within the
batch, and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 6). For our example above, the output will be
equal to:
```python
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
dense_shape=[3, 6])
```
This method generalizes to higher-dimensions by simply providing a list for
both the sp_ids as well as the vocab_size.
In this case the resulting `SparseTensor` has the following properties:
- `indices` is equivalent to `sp_ids[0].indices` with the last
dimension discarded and concatenated with
`sp_ids[0].values, sp_ids[1].values, ...`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn] + vocab_size`.
Args:
sp_ids: A single `SparseTensor` with `values` property of type `int32`
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
sp_values: A `SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
all `i`.
name: A name prefix for the returned tensors (optional)
already_sorted: A boolean to specify whether the per-batch values in
`sp_values` are already sorted. If so skip sorting, False by default
(optional).
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither
a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a
`Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if
`vocab_size` is not a or list thereof and `sp_ids` is a list.
ValueError: If `sp_ids` and `vocab_size` are lists of different lengths.
"""
if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance(
sp_ids, sparse_tensor.SparseTensor):
sp_ids = [sp_ids]
if not (isinstance(vocab_size, ops.Tensor) or
isinstance(vocab_size, numbers.Integral)):
raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" %
type(vocab_size))
vocab_size = [vocab_size]
else:
if not isinstance(sp_ids, collections.Iterable):
raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
"Found %s" % type(sp_ids))
if not isinstance(vocab_size, collections.Iterable):
raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
"Found %s" % type(vocab_size))
for dim in vocab_size:
if not (isinstance(dim, ops.Tensor) or isinstance(dim, numbers.Integral)):
raise TypeError(
"vocab_size has to be a list of Tensors or Python ints. Found %s" %
type(dim))
if len(sp_ids) != len(vocab_size):
raise ValueError("sp_ids and vocab_size have to have equal lengths.")
with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids]
sp_values = _convert_to_sparse_tensor(sp_values)
ids = []
for sp_ids_dim in sp_ids:
ids_dim = sp_ids_dim.values
if sp_ids_dim.dtype != dtypes.int64:
ids_dim = math_ops.cast(ids_dim, dtypes.int64)
ids += [array_ops.expand_dims(ids_dim, axis=1)]
vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size]
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = sp_ids[0].indices[:, :-1]
new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1)
new_values = sp_values.values
new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
return result if already_sorted else sparse_reorder(result)
@tf_export("sparse_retain")
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.dense_shape))
@tf_export("sparse_reset_shape")
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
If `new_shape` is None, returns a copy of `sp_input` with its shape reset
to the tight bounding box of `sp_input`. This will be a shape consisting of
all zeros if sp_input has no values.
If `new_shape` is provided, then it must be larger or equal in all dimensions
compared to the shape of `sp_input`. When this condition is met, the returned
SparseTensor will have its shape reset to `new_shape` and its indices and
values unchanged from that of `sp_input.`
For example:
Consider a `sp_input` with shape [2, 3, 5]:
[0, 0, 1]: a
[0, 1, 0]: b
[0, 2, 2]: c
[1, 0, 3]: d
- It is an error to set `new_shape` as [3, 7] since this represents a
rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
during graph construction (if both shapes are known) or an OpError during
run time.
- Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
equal in every dimension compared to the original shape [2, 3, 5].
- On the other hand, setting new_shape as [2, 3, 4] is also an error: The
third dimension is smaller than the original shape [2, 3, 5] (and an
`InvalidArgumentError` will be raised).
- If `new_shape` is None, the returned SparseTensor will have a shape
[2, 3, 4], which is the tight bounding box of `sp_input`.
Args:
sp_input: The input `SparseTensor`.
new_shape: None or a vector representing the new shape for the returned
`SparseTensor`.
Returns:
A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is
`new_shape` if that is set. Otherwise it is the tight bounding box of
`input_sp`
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If `new_shape` represents a tensor with a different rank from
that of `sp_input` (if shapes are known when graph is constructed).
ValueError: If `new_shape` is determined during graph build to have
dimension sizes that are too small.
OpError:
- If `new_shape` has dimension sizes that are too small.
- If shapes are not known during graph construction time, and during run
time it is found out that the ranks do not match.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
in_indices = array_ops.identity(sp_input.indices)
in_values = array_ops.identity(sp_input.values)
in_shape = array_ops.identity(sp_input.dense_shape)
if new_shape is None:
dim_low_bound = math_ops.reduce_max(in_indices, axis=0)
output_shape_tensor = math_ops.maximum(
array_ops.constant(0, dtype=dtypes.int64),
math_ops.add(dim_low_bound, array_ops.ones_like(in_shape)))
else:
output_shape_tensor = ops.convert_to_tensor(new_shape)
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
output_shape_tensor_const = tensor_util.constant_value(output_shape_tensor)
# For cases where all shapes are known during graph construction
if (output_shape_tensor_const is not None and
sp_input.get_shape().is_fully_defined()):
in_shape_const = np.array(sp_input.get_shape().as_list())
if not np.all(in_shape_const <= output_shape_tensor_const):
raise ValueError(
"Requested new_shape should have dimension sizes >= sp_input.shape."
" Found new_shape (%s), sp_input.shape (%s)." %
(in_shape_const, output_shape_tensor_const))
output_shape_tensor = output_shape_tensor_const
else:
# For cases where shape is not known during graph construction.
output_shape_tensor = control_flow_ops.with_dependencies([
check_ops.assert_equal(
array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))
], output_shape_tensor)
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
@tf_export("sparse_fill_empty_rows")
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
default_value = ops.convert_to_tensor(
default_value, dtype=sp_input.values.dtype)
(output_indices, output_values, empty_row_indicator,
unused_reverse_index_map) = gen_sparse_ops.sparse_fill_empty_rows(
indices=sp_input.indices,
values=sp_input.values,
dense_shape=sp_input.dense_shape,
default_value=default_value)
return (sparse_tensor.SparseTensor(
indices=output_indices,
values=output_values,
dense_shape=sp_input.dense_shape), empty_row_indicator)
@tf_export("serialize_sparse")
def serialize_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A 3-vector (1-D `Tensor`), with each column representing the serialized
`SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
@tf_export("serialize_many_sparse")
def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column
represents serialized `SparseTensor`'s indices, values, and shape
(respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_many_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize `SparseTensor` objects.
The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
the last dimension stores serialized `SparseTensor` objects and the other N
dimensions (N >= 0) correspond to a batch. The ranks of the original
`SparseTensor` objects must all match. When the final `SparseTensor` is
created, its rank is the rank of the incoming `SparseTensor` objects plus N;
the sparse tensors have been concatenated along new dimensions, one for each
batch.
The output `SparseTensor` object's shape values for the original dimensions
are the max across the input `SparseTensor` objects' shape values for the
corresponding dimensions. The new dimensions match the size of the batch.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: The serialized `SparseTensor` objects.
The last dimension must have 3 columns.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional).
Returns:
A `SparseTensor` representing the deserialized `SparseTensor` objects.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_sparse(serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export("deserialize_many_sparse")
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export("sparse_tensor_dense_matmul")
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of `A`. However, the
following input format is recommended for optimal behavior:
* If `adjoint_a == false`: `A` should be sorted in lexicographically
increasing order. Use `sparse_reorder` if you're not sure.
* If `adjoint_a == true`: `A` should be sorted in order of increasing
dimension 1 (i.e., "column major" order instead of "row major" order).
Using `tf.nn.embedding_lookup_sparse` for sparse multiplication:
It's not obvious but you can consider `embedding_lookup_sparse` as another
sparse and dense multiplication. In some situations, you may prefer to use
`embedding_lookup_sparse` even though you're not dealing with embeddings.
There are two questions to ask in the decision process: Do you need gradients
computed as sparse too? Is your sparse data represented as two
`SparseTensor`s: ids and values? There is more explanation about data format
below. If you answer any of these questions as yes, consider using
`tf.nn.embedding_lookup_sparse`.
Following explains differences between the expected SparseTensors:
For example if dense form of your sparse data has shape `[3, 5]` and values:
[[ a ]
[b c]
[ d ]]
`SparseTensor` format expected by `sparse_tensor_dense_matmul`:
`sp_a` (indices, values):
[0, 1]: a
[1, 0]: b
[1, 4]: c
[2, 2]: d
`SparseTensor` format expected by `embedding_lookup_sparse`:
`sp_ids` `sp_weights`
[0, 0]: 1 [0, 0]: a
[1, 0]: 0 [1, 0]: b
[1, 1]: 4 [1, 1]: c
[2, 0]: 2 [2, 0]: d
Deciding when to use `sparse_tensor_dense_matmul` vs.
`matmul`(a_is_sparse=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor `A` fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of `A` larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the `SparseTensor` to a dense one and using `tf.matmul` with
`a_is_sparse=True`.
This operation tends to perform well when `A` is more sparse, if the column
size of the product is small (e.g. matrix-vector multiplication), if
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between `sparse_tensor_dense_matmul`,
labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For
purposes of the comparison, the time spent converting from a `SparseTensor` to
a dense `Tensor` is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
`-c opt --config=cuda --copt=-mavx`
```
tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.00014158 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
`A = A.H if adjoint_a else A`
`B = B.H if adjoint_b else B`
`return A*B`
"""
# pylint: enable=line-too-long
sp_a = _convert_to_sparse_tensor(sp_a)
with ops.name_scope(name, "SparseTensorDenseMatMul",
[sp_a.indices, sp_a.values, b]) as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops.sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.dense_shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
@tf_export("sparse_softmax")
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic
order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each
innermost logical submatrix with shape `[B, C]`, but with the catch that *the
implicitly zero elements do not participate*. Specifically, the algorithm is
equivalent to:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost
submatrix with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Example:
```python
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
shape = [2, 2, 2] # 3-D SparseTensor
values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
indices = np.vstack(np.where(values)).astype(np.int64).T
result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))
# ...returning a 3-D SparseTensor, equivalent to:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
```
Args:
sp_input: N-D `SparseTensor`, where `N >= 2`.
name: optional name of the operation.
Returns:
output: N-D `SparseTensor` representing the results.
"""
with ops.name_scope(name, "SparseSoftmax",
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.dense_shape)
return sparse_tensor.SparseTensor(sp_input.indices, out_vals,
sp_input.dense_shape)
@tf_export("sparse_maximum")
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMaximum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse_minimum")
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMinimum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse_transpose")
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[5, 4]` and
`indices` / `values`:
[0, 2]: c
[1, 0]: a
[1, 3]: d
[3, 0]: b
Args:
sp_input: The input `SparseTensor`.
perm: A permutation of the dimensions of `sp_input`.
name: A name prefix for the returned tensors (optional)
Returns:
A transposed `SparseTensor`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
with ops.name_scope(name, "SparseTranspose", [sp_input]) as name:
if perm is None:
rank = array_ops.rank(sp_input)
perm = (rank - 1) - math_ops.range(0, rank, 1)
indices = sp_input.indices
transposed_indices = array_ops.transpose(
array_ops.gather(array_ops.transpose(indices), perm))
perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm))
if perm_ is not None and sp_input.get_shape().is_fully_defined():
old_shape_ = sp_input.get_shape().as_list()
transposed_dense_shape = list(old_shape_) # Copy.
for i, p in enumerate(perm_):
transposed_dense_shape[i] = old_shape_[p]
else:
dense_shape = sp_input.dense_shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values, transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
def _add_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
Args:
sp_input: The input `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _add_many_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_many_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _take_many_sparse_from_tensors_map(sparse_map_op,
sparse_handles,
rank=None,
name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError(
"sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops.take_many_sparse_from_tensors_map(
sparse_handles,
dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name,
name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
|
{
"content_hash": "b81d57e52a1935cf4737f677b3df1396",
"timestamp": "",
"source": "github",
"line_count": 2165,
"max_line_length": 80,
"avg_line_length": 37.558429561200924,
"alnum_prop": 0.6476621491993999,
"repo_name": "eaplatanios/tensorflow",
"id": "c580052c32c8b61467b857af3d237be41718c1a1",
"size": "82052",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/sparse_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "163987"
},
{
"name": "C++",
"bytes": "34944901"
},
{
"name": "CMake",
"bytes": "5123"
},
{
"name": "CSS",
"bytes": "9206"
},
{
"name": "Go",
"bytes": "1047216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "423531"
},
{
"name": "JavaScript",
"bytes": "3127"
},
{
"name": "Jupyter Notebook",
"bytes": "1833814"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "19718973"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Scala",
"bytes": "3606806"
},
{
"name": "Shell",
"bytes": "352897"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
"""Custom template tags."""
|
{
"content_hash": "74f9fc99bfe75ec71f09985c63c0f7c7",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.6428571428571429,
"repo_name": "Moliholy/cvmfs-browser",
"id": "926ec359ff7cdee4fdb0f63c27771beae21b410f",
"size": "28",
"binary": false,
"copies": "5",
"ref": "refs/heads/devel",
"path": "cvmfs_browser/templatetags/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5223"
},
{
"name": "HTML",
"bytes": "14212"
},
{
"name": "JavaScript",
"bytes": "6952"
},
{
"name": "Python",
"bytes": "60177"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
def add_new_data_variant(apps, schema_editor):
DataVariant = apps.get_model('extrequests', 'DataVariant')
dv = "Library data (e.g. MARC, XML, RDF, JSON)"
DataVariant.objects.create(name=dv, unknown=False)
class Migration(migrations.Migration):
dependencies = [
('extrequests', '0014_auto_20190907_1113'),
]
operations = [
migrations.RunPython(add_new_data_variant),
]
|
{
"content_hash": "a96bb9eff68961a3a435c03821174b03",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 25.5,
"alnum_prop": 0.6710239651416122,
"repo_name": "pbanaszkiewicz/amy",
"id": "a14833c8c1d208205b32ea323a26524e90c9a1fe",
"size": "508",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "amy/extrequests/migrations/0015_auto_20190907_1116.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5850"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "313293"
},
{
"name": "JavaScript",
"bytes": "39427"
},
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "2707815"
}
],
"symlink_target": ""
}
|
"""
Java
====
Installs Java, currently restricted to version 7.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.java
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
@task
def setup():
"""
Install Java
"""
install()
def install():
with sudo():
lbs_release = debian.lbs_release()
if lbs_release == '12.04':
debian.add_apt_ppa('webupd8team/java')
debian.debconf_set_selections('shared/accepted-oracle-license-v1-1 select true',
'shared/accepted-oracle-license-v1-1 seen true')
package = 'oracle-java7-installer'
else:
package = 'java7-jdk'
info('Install Java 7 JDK')
debian.apt_get('install', package)
|
{
"content_hash": "7b5586a2722f648c2b4b0d4830da9369",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 92,
"avg_line_length": 19.565217391304348,
"alnum_prop": 0.5866666666666667,
"repo_name": "adisbladis/blues",
"id": "7eab32fdb760c885e47c9e883bc0943beb2a352d",
"size": "900",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "blues/java.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2300"
},
{
"name": "Nginx",
"bytes": "2892"
},
{
"name": "Python",
"bytes": "136720"
},
{
"name": "Shell",
"bytes": "2706"
}
],
"symlink_target": ""
}
|
"""
File: base64.h
--------------
This file defines a set of functions for encoding and decoding binary data
in the base64 format. See:
http://en.wikipedia.org/wiki/Base64
It is a lightweight wrapper around the builtin base64 library that ships with
Python.
"""
import base64 as _base64
def encode(b):
"""Encode bytes-like object b using the standard Base64 alphabet and return the encoded bytes.
b is the byte string to encode. The encoded byte string is returned.
"""
return _base64.standard_b64encode(b)
def decode(b):
"""Decode bytes-like object or ASCII string s using the standard Base64 alphabet and return the decoded bytes.
b is the byte string to decode. The decoded byte string is returned.
"""
return _base64.standard_b64decode(b)
__all__ = ['encode', 'decode']
if __name__ == '__main__':
print("**TESTING {}**".format(__file__))
msg = b'Hello world!'
encoded = encode(msg)
decoded = decode(encoded)
print("{} encodes into {}".format(msg, encoded))
print("{} decodes into {}".format(encoded, decoded))
|
{
"content_hash": "1bef3677bba587af4c140600fee43526",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 114,
"avg_line_length": 29.243243243243242,
"alnum_prop": 0.6746765249537893,
"repo_name": "SarahPythonista/acmpy",
"id": "c05378fddfe20c3de99c08f566e2b1d142cc0dec",
"size": "1109",
"binary": false,
"copies": "1",
"ref": "refs/heads/stylistic",
"path": "spgl/io/base64helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7737"
},
{
"name": "Python",
"bytes": "404025"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from binascii import unhexlify
import cStringIO
import time
'''
This test is meant to exercise activation of the first version bits soft fork
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
'''
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in xrange(2):
b25times = []
for b25 in xrange(2):
b22times = []
for b22 in xrange(2):
b18times = []
for b18 in xrange(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in xrange(2):
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in xrange(2):
b25txs = []
for b25 in xrange(2):
b22txs = []
for b22 in xrange(2):
b18txs = []
for b18 in xrange(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_NOP3, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in xrange(2):
b25txs = []
for b25 in xrange(2):
b22txs = []
for b22 in xrange(2):
b18txs = []
for b18 in xrange(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_NOP3, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_NOP3, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in xrange(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in xrange(2):
inputs = []
for i in xrange(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in xrange(2):
inputs = []
for i in xrange(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash + "L", 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in xrange(2):
for b18 in xrange(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in xrange(2):
for b18 in xrange(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
|
{
"content_hash": "85f4dc24dd51c6d305a66af36416846e",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 172,
"avg_line_length": 50.78838951310861,
"alnum_prop": 0.6240920320047196,
"repo_name": "SartoNess/BitcoinUnlimited",
"id": "93746f78afd4622b8229b935e6469d13cc6f4df9",
"size": "27396",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.12.1bu",
"path": "qa/rpc-tests/bip68-112-113-p2p.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "647624"
},
{
"name": "C++",
"bytes": "4606262"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3821"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "156035"
},
{
"name": "Makefile",
"bytes": "96732"
},
{
"name": "Objective-C",
"bytes": "5375"
},
{
"name": "Objective-C++",
"bytes": "7360"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "687117"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "34356"
},
{
"name": "TypeScript",
"bytes": "3894179"
}
],
"symlink_target": ""
}
|
import inspect
from p1tr.helpers import clean_string, pretty_list
from p1tr.plugin import *
@meta_plugin
class Help(Plugin):
"""Provides help for all plugins by accessing their docstrings."""
@command
def help(self, server, channel, nick, params):
"""
Usage: help PLUGIN [COMMAND] - prints a help message. If only PLUGIN is
specified, you get general plugin information and a list of available
commands. Otherwise, the help for the specific COMMAND of the PLUGIN is
provided. If PLUGIN can't be found, I will look for a command with that
name.
"""
if len(params) < 1:
return clean_string(self.help.__doc__)
if len(params) < 2:
if params[0] in self.bot.plugins: # Plugin found
help_msg = clean_string(self.bot.plugins[params[0]].__doc__ or \
'Sorry, no help message available.')
commands = sorted(list(name \
for name, member \
in inspect.getmembers(self.bot.plugins[params[0]])
if hasattr(member, '__annotations__') \
and 'command' in member.__annotations__))
if len(commands) > 0:
help_msg += ' Commands: %s' % pretty_list(commands)
return clean_string(help_msg)
elif params[0] in self.bot.commands: # Command found
return clean_string(getattr(
self.bot.commands[params[0]], params[0]).__doc__ or \
'Sorry, no help message available.')
else:
return 'Plugin or command "%s" not found.' % params[0]
# Only Plugin->Command left now. Try to find it...
if params[1] in self.bot.commands and \
self.bot.commands[params[1]].__class__.__name__.lower() \
== params[0]:
return clean_string(getattr(
self.bot.plugins[params[0]], params[1]).__doc__ or \
'Sorry, no help message available.')
# If everything fails:
return 'Command "%s" from plugin "%s" not found.' % (params[1],
params[0])
@command
def list_commands(self, server, channel, nick, params):
"""Lists all available commands."""
return pretty_list(self.bot.commands.keys())
@command
def list_plugins(self, server, channel, nick, params):
"""
Lists all active plugins. Plugins on the global- or server-wide
blacklist are not shown.
"""
return pretty_list(self.bot.plugins.keys())
|
{
"content_hash": "4f280b91056b3f167e341f594a25bf57",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 44.35,
"alnum_prop": 0.5505449079293498,
"repo_name": "howard/p1tr-tng",
"id": "39ec70071a1616395ce4f0bd1f7cb5083b31612a",
"size": "2661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/help/help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Elixir",
"bytes": "3175"
},
{
"name": "Python",
"bytes": "117228"
},
{
"name": "Shell",
"bytes": "2683"
}
],
"symlink_target": ""
}
|
from setuptools import setup
# version_tuple = __import__('xadmin.version').VERSION
# version = ".".join([str(v) for v in version_tuple])
setup(
name='django-xadmin',
version='0.4.2',
description='Drop-in replacement of Django admin comes with lots of goodies, fully extensible with plugin support, pretty UI based on Twitter Bootstrap.',
long_description=open('README.rst').read(),
author='sshwsfc',
author_email='sshwsfc@gmail.com',
license=open('LICENSE').read(),
url='http://www.xadmin.io',
download_url='http://github.com/sshwsfc/django-xadmin/archive/master.zip',
packages=['xadmin', 'xadmin.plugins', 'xadmin.templatetags', 'xadmin.views'],
include_package_data=True,
install_requires=[
'setuptools',
'django>=1.5',
'django-crispy-forms>=1.4.0',
],
extras_require={
'Excel': ['xlwt', 'xlsxwriter'],
'Reversion': ['django-reversion'],
},
zip_safe=False,
keywords=['admin', 'django', 'xadmin', 'bootstrap'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
"Programming Language :: JavaScript",
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
|
{
"content_hash": "c55285d30b5f898535bcd22ef4deb002",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 158,
"avg_line_length": 37.51162790697674,
"alnum_prop": 0.6150030998140111,
"repo_name": "jneight/django-xadmin",
"id": "aa862e3c49ede49cb7032161d15b7a9578b1cef2",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/batch-fix",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23473"
},
{
"name": "HTML",
"bytes": "94136"
},
{
"name": "JavaScript",
"bytes": "63926"
},
{
"name": "Python",
"bytes": "397199"
}
],
"symlink_target": ""
}
|
'''A setuptools script to install strip_recipes
'''
from setuptools import setup
setup(
name='strip_recipes',
version='1.0.0',
description='Create recipes for the LSPE/Strip tester software',
long_description='''
Python library to easily create complex recipes to be used with the LSPE/Strip
tester software. All the Python control structures (if, while, for) can be used.
(LSPE is a balloon/ground experiment to search for the B-mode signal in the
polarization pattern of the Cosmic Microwave Background. Strip is the low-frequency
polarimetric instrument that will measure the sky from the ground.)''',
author='Maurizio Tomasi',
author_email='maurizio.tomasi@unimiREMOVETHIS.it',
url='https://github.com/ziotom78/strip_recipes',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Code Generators'
],
keywords='cosmology laboratory',
test_suite='nose.collector',
tests_require=['nose']
)
|
{
"content_hash": "91d947fee9b3c8d1684964ecd653315a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 83,
"avg_line_length": 38.86666666666667,
"alnum_prop": 0.7015437392795884,
"repo_name": "lspestrip/strip_recipes",
"id": "3f9d875763e29ee9e3b9a97d467a246be05efa5e",
"size": "1193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11898"
}
],
"symlink_target": ""
}
|
import pytest
import re
from .module import SnapshotModule, SnapshotTest
from .diff import PrettyDiff
from .reporting import reporting_lines, diff_report
def pytest_addoption(parser):
group = parser.getgroup("snapshottest")
group.addoption(
"--snapshot-update",
action="store_true",
default=False,
dest="snapshot_update",
help="Update the snapshots.",
)
group.addoption(
"--snapshot-verbose",
action="store_true",
default=False,
help="Dump diagnostic and progress information.",
)
class PyTestSnapshotTest(SnapshotTest):
def __init__(self, request=None):
self.request = request
super(PyTestSnapshotTest, self).__init__()
@property
def module(self):
return SnapshotModule.get_module_for_testpath(self.request.node.fspath.strpath)
@property
def update(self):
return self.request.config.option.snapshot_update
@property
def test_name(self):
cls_name = getattr(self.request.node.cls, "__name__", "")
flattened_node_name = re.sub(
r"\s+", " ", self.request.node.name.replace(r"\n", " ")
)
return "{}{} {}".format(
"{}.".format(cls_name) if cls_name else "",
flattened_node_name,
self.curr_snapshot,
)
class SnapshotSession(object):
def __init__(self, config):
self.verbose = config.getoption("snapshot_verbose")
self.config = config
def display(self, tr):
if not SnapshotModule.has_snapshots():
return
tr.write_sep("=", "SnapshotTest summary")
for line in reporting_lines("pytest"):
tr.write_line(line)
def pytest_assertrepr_compare(op, left, right):
if isinstance(left, PrettyDiff) and op == "==":
return diff_report(left, right)
@pytest.fixture
def snapshot(request):
with PyTestSnapshotTest(request) as snapshot_test:
yield snapshot_test
def pytest_terminal_summary(terminalreporter):
if terminalreporter.config.option.snapshot_update:
for module in SnapshotModule.get_modules():
module.delete_unvisited()
module.save()
terminalreporter.config._snapshotsession.display(terminalreporter)
# force the other plugins to initialise first
# (fixes issue with capture not being properly initialised)
@pytest.mark.trylast
def pytest_configure(config):
config._snapshotsession = SnapshotSession(config)
# config.pluginmanager.register(bs, "snapshottest")
|
{
"content_hash": "3b0f8f38d40f9b9a7d9b2bfdca56a30a",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 87,
"avg_line_length": 27.717391304347824,
"alnum_prop": 0.6450980392156863,
"repo_name": "syrusakbary/snapshottest",
"id": "b820e1a0620928d903b4094aeb4f9f1d6c9d592b",
"size": "2550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snapshottest/pytest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "516"
},
{
"name": "Python",
"bytes": "41711"
}
],
"symlink_target": ""
}
|
"""
ZSNES Save State Parser (v143 only currently)
Author: Jason Gorski
Creation date: 2006-09-15
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, StaticFieldSet,
UInt8, UInt16, UInt32,
String, PaddingBytes, Bytes, RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN
class ZSTHeader(StaticFieldSet):
format = (
(String, "zs_mesg", 26, "File header", {"charset": "ASCII"}),
(UInt8, "zs_mesglen", "File header string len"),
(UInt8, "zs_version", "Version minor #"),
(UInt8, "curcyc", "cycles left in scanline"),
(UInt16, "curypos", "current y position"),
(UInt8, "cacheud", "update cache every ? frames"),
(UInt8, "ccud", "current cache increment"),
(UInt8, "intrset", "interrupt set"),
(UInt8, "cycpl", "cycles per scanline"),
(UInt8, "cycphb", "cycles per hblank"),
(UInt8, "spcon", "SPC Enable (1=enabled)"),
(UInt16, "stackand", "value to and stack to keep it from going to the wrong area"),
(UInt16, "stackor", "value to or stack to keep it from going to the wrong area"),
)
class ZSTcpu(StaticFieldSet):
format = (
(UInt16, "xat"),
(UInt8, "xdbt"),
(UInt8, "xpbt"),
(UInt16, "xst"),
(UInt16, "xdt"),
(UInt16, "xxt"),
(UInt16, "xyt"),
(UInt8, "xp"),
(UInt8, "xe"),
(UInt16, "xpc"),
(UInt8, "xirqb", "which bank the irqs start at"),
(UInt8, "debugger", "Start with debugger (1: yes, 0: no)"),
(UInt32, "Curtable" "Current table address"),
(UInt8, "curnmi", "if in NMI (1=yes)"),
(UInt32, "cycpbl", "percentage left of CPU/SPC to run (3.58 = 175)"),
(UInt32, "cycpblt", "percentage of CPU/SPC to run"),
)
class ZSTppu(FieldSet):
static_size = 3019*8
def createFields(self):
yield UInt8(self, "sndrot", "rotates to use A,X or Y for sound skip")
yield UInt8(self, "sndrot2", "rotates a random value for sound skip")
yield UInt8(self, "INTEnab", "enables NMI(7)/VIRQ(5)/HIRQ(4)/JOY(0)")
yield UInt8(self, "NMIEnab", "controlled in e65816 loop. Sets to 81h")
yield UInt16(self, "VIRQLoc", "VIRQ Y location")
yield UInt8(self, "vidbright", "screen brightness 0..15")
yield UInt8(self, "previdbr", "previous screen brightness")
yield UInt8(self, "forceblnk", "force blanking on/off ($80=on)")
yield UInt32(self, "objptr", "pointer to object data in VRAM")
yield UInt32(self, "objptrn", "pointer2 to object data in VRAM")
yield UInt8(self, "objsize1", "1=8dot, 4=16dot, 16=32dot, 64=64dot")
yield UInt8(self, "objsize2", "large object size")
yield UInt8(self, "objmovs1", "number of bytes to move/paragraph")
yield UInt16(self, "objadds1", "number of bytes to add/paragraph")
yield UInt8(self, "objmovs2", "number of bytes to move/paragraph")
yield UInt16(self, "objadds2", "number of bytes to add/paragraph")
yield UInt16(self, "oamaddrt", "oam address")
yield UInt16(self, "oamaddrs", "oam address at beginning of vblank")
yield UInt8(self, "objhipr", "highest priority object #")
yield UInt8(self, "bgmode", "graphics mode 0..7")
yield UInt8(self, "bg3highst", "is 1 if background 3 has the highest priority")
yield UInt8(self, "bgtilesz", "0=8x8, 1=16x16 bit0=bg1, bit1=bg2, etc.")
yield UInt8(self, "mosaicon", "mosaic on, bit 0=bg1, bit1=bg2, etc.")
yield UInt8(self, "mosaicsz", "mosaic size in pixels")
yield UInt16(self, "bg1ptr", "pointer to background1")
yield UInt16(self, "bg2ptr", "pointer to background2")
yield UInt16(self, "bg3ptr", "pointer to background3")
yield UInt16(self, "bg4ptr", "pointer to background4")
yield UInt16(self, "bg1ptrb", "pointer to background1")
yield UInt16(self, "bg2ptrb", "pointer to background2")
yield UInt16(self, "bg3ptrb", "pointer to background3")
yield UInt16(self, "bg4ptrb", "pointer to background4")
yield UInt16(self, "bg1ptrc", "pointer to background1")
yield UInt16(self, "bg2ptrc", "pointer to background2")
yield UInt16(self, "bg3ptrc", "pointer to background3")
yield UInt16(self, "bg4ptrc", "pointer to background4")
yield UInt16(self, "bg1ptrd", "pointer to background1")
yield UInt16(self, "bg2ptrd", "pointer to background2")
yield UInt16(self, "bg3ptrd", "pointer to background3")
yield UInt16(self, "bg4ptrd", "pointer to background4")
yield UInt8(self, "bg1scsize", "bg #1 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt8(self, "bg2scsize", "bg #2 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt8(self, "bg3scsize", "bg #3 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt8(self, "bg4scsize", "bg #4 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt16(self, "bg1objptr", "pointer to tiles in background1")
yield UInt16(self, "bg2objptr", "pointer to tiles in background2")
yield UInt16(self, "bg3objptr", "pointer to tiles in background3")
yield UInt16(self, "bg4objptr", "pointer to tiles in background4")
yield UInt16(self, "bg1scrolx", "background 1 x position")
yield UInt16(self, "bg2scrolx", "background 2 x position")
yield UInt16(self, "bg3scrolx", "background 3 x position")
yield UInt16(self, "bg4scrolx", "background 4 x position")
yield UInt16(self, "bg1sx", "Temporary Variable for Debugging purposes")
yield UInt16(self, "bg1scroly", "background 1 y position")
yield UInt16(self, "bg2scroly", "background 2 y position")
yield UInt16(self, "bg3scroly", "background 3 y position")
yield UInt16(self, "bg4scroly", "background 4 y position")
yield UInt16(self, "addrincr", "vram increment (2,64,128,256)")
yield UInt8(self, "vramincr", "0 = increment at 2118/2138, 1 = 2119,213A")
yield UInt8(self, "vramread", "0 = address set, 1 = already read once")
yield UInt32(self, "vramaddr", "vram address")
yield UInt16(self, "cgaddr", "cg (palette)")
yield UInt8(self, "cgmod", "if cgram is modified or not")
yield UInt16(self, "scrnon", "main & sub screen on")
yield UInt8(self, "scrndist", "which background is disabled")
yield UInt16(self, "resolutn", "screen resolution")
yield UInt8(self, "multa", "multiplier A")
yield UInt16(self, "diva", "divisor C")
yield UInt16(self, "divres", "quotent of divc/divb")
yield UInt16(self, "multres", "result of multa * multb/remainder of divc/divb")
yield UInt16(self, "latchx", "latched x value")
yield UInt16(self, "latchy", "latched y value")
yield UInt8(self, "latchxr", "low or high byte read for x value")
yield UInt8(self, "latchyr", "low or high byte read for y value")
yield UInt8(self, "frskipper", "used to control frame skipping")
yield UInt8(self, "winl1", "window 1 left position")
yield UInt8(self, "winr1", "window 1 right position")
yield UInt8(self, "winl2", "window 2 left position")
yield UInt8(self, "winr2", "window 2 right position")
yield UInt8(self, "winbg1en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG1")
yield UInt8(self, "winbg2en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG2")
yield UInt8(self, "winbg3en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG3")
yield UInt8(self, "winbg4en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG4")
yield UInt8(self, "winobjen", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on sprites")
yield UInt8(self, "wincolen", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on backarea")
yield UInt8(self, "winlogica", "Window logic type for BG1 to 4")
yield UInt8(self, "winlogicb", "Window logic type for Sprites and Backarea")
yield UInt8(self, "winenabm", "Window logic enable for main screen")
yield UInt8(self, "winenabs", "Window logic enable for sub sceen")
yield UInt8(self, "mode7set", "mode 7 settings")
yield UInt16(self, "mode7A", "A value for Mode 7")
yield UInt16(self, "mode7B", "B value for Mode 7")
yield UInt16(self, "mode7C", "C value for Mode 7")
yield UInt16(self, "mode7D", "D value for Mode 7")
yield UInt16(self, "mode7X0", "Center X for Mode 7")
yield UInt16(self, "mode7Y0", "Center Y for Mode 7")
yield UInt8(self, "JoyAPos", "Old-Style Joystick Read Position for Joy 1 & 3")
yield UInt8(self, "JoyBPos", "Old-Style Joystick Read Position for Joy 2 & 4")
yield UInt32(self, "compmult", "Complement Multiplication for Mode 7")
yield UInt8(self, "joyalt", "temporary joystick alternation")
yield UInt32(self, "wramrwadr", "continuous read/write to wram address")
yield RawBytes(self, "dmadata", 129, "dma data (written from ports 43xx)")
yield UInt8(self, "irqon", "if IRQ has been called (80h) or not (0)")
yield UInt8(self, "nexthdma", "HDMA data to execute once vblank ends")
yield UInt8(self, "curhdma", "Currently executed hdma")
yield RawBytes(self, "hdmadata", 152, "4 dword register addresses, # bytes to transfer/line, address increment (word)")
yield UInt8(self, "hdmatype", "if first time executing hdma or not")
yield UInt8(self, "coladdr", "red value of color to add")
yield UInt8(self, "coladdg", "green value of color to add")
yield UInt8(self, "coladdb", "blue value of color to add")
yield UInt8(self, "colnull", "keep this 0 (when accessing colors by dword)")
yield UInt8(self, "scaddset", "screen/fixed color addition settings")
yield UInt8(self, "scaddtype", "which screen to add/sub")
yield UInt8(self, "Voice0Disabl2", "Disable Voice 0")
yield UInt8(self, "Voice1Disabl2", "Disable Voice 1")
yield UInt8(self, "Voice2Disabl2", "Disable Voice 2")
yield UInt8(self, "Voice3Disabl2", "Disable Voice 3")
yield UInt8(self, "Voice4Disabl2", "Disable Voice 4")
yield UInt8(self, "Voice5Disabl2", "Disable Voice 5")
yield UInt8(self, "Voice6Disabl2", "Disable Voice 6")
yield UInt8(self, "Voice7Disabl2", "Disable Voice 7")
yield RawBytes(self, "oamram", 1024, "OAMRAM (544 bytes)")
yield RawBytes(self, "cgram", 512, "CGRAM")
yield RawBytes(self, "pcgram", 512, "Previous CGRAM")
yield UInt8(self, "vraminctype")
yield UInt8(self, "vramincby8on", "if increment by 8 is on")
yield UInt8(self, "vramincby8left", "how many left")
yield UInt8(self, "vramincby8totl", "how many in total (32,64,128)")
yield UInt8(self, "vramincby8rowl", "how many left in that row (start at 8)")
yield UInt16(self, "vramincby8ptri", "increment by how many when rowl = 0")
yield UInt8(self, "nexthprior")
yield UInt8(self, "doirqnext")
yield UInt16(self, "vramincby8var")
yield UInt8(self, "screstype")
yield UInt8(self, "extlatch")
yield UInt8(self, "cfield")
yield UInt8(self, "interlval")
yield UInt16(self, "HIRQLoc HIRQ X")
# NEWer ZST format
yield UInt8(self, "KeyOnStA")
yield UInt8(self, "KeyOnStB")
yield UInt8(self, "SDD1BankA")
yield UInt8(self, "SDD1BankB")
yield UInt8(self, "SDD1BankC")
yield UInt8(self, "SDD1BankD")
yield UInt8(self, "vramread2")
yield UInt8(self, "nosprincr")
yield UInt16(self, "poamaddrs")
yield UInt8(self, "ioportval")
yield UInt8(self, "iohvlatch")
yield UInt8(self, "ppustatus")
yield PaddingBytes(self, "tempdat", 477, "Reserved/Unused")
class ZSNESFile(Parser):
PARSER_TAGS = {
"id": "zsnes",
"category": "game",
"description": "ZSNES Save State File (only version 143)",
"min_size": 3091*8,
"file_ext": ("zst", "zs1", "zs2", "zs3", "zs4", "zs5", "zs6",
"zs7", "zs8", "zs9")
}
endian = LITTLE_ENDIAN
def validate(self):
temp = self.stream.readBytes(0,28)
if temp[0:26] != "ZSNES Save State File V143":
return "Wrong header"
if ord(temp[27:28]) != 143: # extra...
return "Wrong save version %d <> 143" % temp[27:1]
return True
def seek(self, offset):
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
def createFields(self):
yield ZSTHeader(self, "header", "ZST header") # Offset: 0
yield ZSTcpu(self, "cpu", "ZST cpu registers") # 41
yield ZSTppu(self, "ppu", "ZST CPU registers") # 72
yield RawBytes(self, "wram7E", 65536) # 3091
yield RawBytes(self, "wram7F", 65536) # 68627
yield RawBytes(self, "vram", 65536) # 134163
# TODO: Interpret extra on-cart chip data found at/beyond... 199699
# TODO: Interpret Thumbnail/Screenshot data found at 275291
# 64*56*2(16bit colors) = 7168
padding = self.seekByte(275291, relative=False)
if padding is not None:
yield padding
yield Bytes(self, "thumbnail", 7168, "Thumbnail of playing game in some sort of raw 64x56x16-bit RGB mode?")
|
{
"content_hash": "1ec449554da31bf4d9a5cc35f4190199",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 127,
"avg_line_length": 53.7,
"alnum_prop": 0.6216759776536313,
"repo_name": "Yukinoshita47/Yuki-Chan-The-Auto-Pentest",
"id": "a8f7550614f12b584ec1418284355e7e3b5bf7c7",
"size": "13425",
"binary": false,
"copies": "94",
"ref": "refs/heads/master",
"path": "Module/metagoofil/hachoir_parser/game/zsnes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36211"
},
{
"name": "JavaScript",
"bytes": "3038"
},
{
"name": "Makefile",
"bytes": "1360"
},
{
"name": "Perl",
"bytes": "108876"
},
{
"name": "Python",
"bytes": "3034585"
},
{
"name": "Roff",
"bytes": "6738"
},
{
"name": "Ruby",
"bytes": "2693582"
},
{
"name": "Shell",
"bytes": "53755"
},
{
"name": "XSLT",
"bytes": "5475"
}
],
"symlink_target": ""
}
|
def extractMoriitranslatesWordpressCom(item):
'''
Parser for 'moriitranslates.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "f5dcbcef00932aa704c10fb64158dc9f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.095238095238095,
"alnum_prop": 0.6414762741652021,
"repo_name": "fake-name/ReadableWebProxy",
"id": "42452c381d3afc9894605c5b56535a66ddd19268",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractMoriitranslatesWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
from pyramid.config import Configurator
from pyramid.renderers import JSON
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.add_renderer('json', JSON(indent=4))
config.add_static_view('static', 'hexserver:static', cache_max_age=3600)
config.add_route('landing', '/')
config.add_route('show_current_spell', 'spells/current')
config.add_route('show_spell', 'spells/{cast_time}')
config.add_route('api_setup', '/api/setup')
#config.add_route('api_backup', '/api/backup')
config.add_route('api_create_user', '/api/users', request_method="POST")
config.add_route('api_users', '/api/users')
config.add_route('api_user_authenticate', 'api/users/authenticate', request_method="POST")
config.add_route('api_user_show', '/api/users/{name}')
config.add_route('api_create_spell', '/api/spells', request_method="POST")
config.add_route('api_confirm_spell_cast', '/api/spells/{cast_time}/complete')
config.add_route('api_spells', '/api/spells')
config.add_route('test', '/test')
config.scan()
return config.make_wsgi_app()
|
{
"content_hash": "f4fde2537e48f78dc3b2c622b063a009",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 94,
"avg_line_length": 41,
"alnum_prop": 0.6778805719091674,
"repo_name": "cproctor/hex",
"id": "0e3ca36ce2e8245ed026d20c76f4ba4128528d4e",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/hexserver/hexserver/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "6421"
},
{
"name": "CSS",
"bytes": "168732"
},
{
"name": "JavaScript",
"bytes": "117197"
},
{
"name": "Python",
"bytes": "48917"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""
Update default topics list
"""
update_theme_title_list = [
("Native mobile app development", "Mobile app development"),
("Other software development", "Software development"),
("Other (see description)", "Other")
]
new_theme_list = [
"Unplugged activities",
"Playful coding activities",
"Art and creativity",
"Visual/Block programming",
"Game design",
"Internet of things and wearable computing",
"3D printing",
"Augmented reality",
"Artificial intelligence",
"Motivation and awareness raising",
"Promoting diversity",
]
theme_order_list = [
("Hardware", 0),
("Robotics", 1),
("Data manipulation and visualisation", 2),
("Mobile app development", 3),
("Web development", 4),
("Basic programming concepts", 5),
("Unplugged activities", 6),
("Playful coding activities", 7),
("Art and creativity", 8),
("Visual/Block programming", 9),
("Software development", 10),
("Game design", 11),
("Internet of things and wearable computing", 12),
("3D printing", 13),
("Augmented reality", 14),
("Artificial intelligence", 15),
("Motivation and awareness raising", 16),
("Promoting diversity", 17),
("Other", 18),
]
for theme in update_theme_title_list:
# Throw exception, if theme name does not exists
existing_theme = orm['api.EventTheme'].objects.get(name=theme[0])
existing_theme.name = theme[1]
existing_theme.save()
for theme in new_theme_list:
new_theme = orm['api.EventTheme'].objects.create(name=theme)
new_theme.save()
for theme in theme_order_list:
# Throw exception, if theme name does not exists
existing_theme = orm['api.EventTheme'].objects.get(name=theme[0])
existing_theme.order = theme[1]
existing_theme.save()
def backwards(self, orm):
pass
models = {
'api.event': {
'Meta': {'ordering': "['start_date']", 'object_name': 'Event'},
'audience': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'event_audience'", 'symmetrical': 'False', 'to': "orm['api.EventAudience']"}),
'contact_person': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'event_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'geoposition': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'organizer': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 9, 8, 0, 0)'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}),
'theme': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'event_theme'", 'symmetrical': 'False', 'to': "orm['api.EventTheme']"}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'api.eventaudience': {
'Meta': {'object_name': 'EventAudience'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'api.eventtheme': {
'Meta': {'ordering': "['order', 'name']", 'object_name': 'EventTheme'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'api.socialaccountlist': {
'Meta': {'object_name': 'SocialAccountList'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'api.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'bio': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['api']
symmetrical = True
|
{
"content_hash": "2bd12e191681a14494bf9b5978e6beeb",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 195,
"avg_line_length": 60.69090909090909,
"alnum_prop": 0.543139604553625,
"repo_name": "michelesr/coding-events",
"id": "43fc7615dcae80d8d4f0740b655a315b45074433",
"size": "10038",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/migrations/0008_update_eventtheme_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "258549"
},
{
"name": "HTML",
"bytes": "92229"
},
{
"name": "JavaScript",
"bytes": "150254"
},
{
"name": "Python",
"bytes": "228805"
},
{
"name": "Ruby",
"bytes": "41"
},
{
"name": "Shell",
"bytes": "3554"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import pdb
import sys
import traceback
import gdb
import pwndbg.color.message as message
import pwndbg.config
import pwndbg.memoize
import pwndbg.stdio
try:
import ipdb as pdb
except ImportError:
pass
verbose = pwndbg.config.Parameter('exception-verbose', False, 'whether to print a full stacktracefor exceptions raised in Pwndbg commands')
debug = pwndbg.config.Parameter('exception-debugger', False, 'whether to debug exceptions raised in Pwndbg commands')
@pwndbg.memoize.forever
def inform_report_issue(exception_msg):
"""
Informs user that he can report an issue.
The use of `memoize` makes it reporting only once for a given exception message.
"""
print(message.notice(
"If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\n"
"(Please don't forget to search if it hasn't been reported before)\n"
"To generate the report and open a browser, you may run ") +
message.hint("`bugreport --run-browser`") +
message.notice("\nPS: Pull requests are welcome")
)
def handle(name='Error'):
"""Displays an exception to the user, optionally displaying a full traceback
and spawning an interactive post-moretem debugger.
Notes:
- ``set exception-verbose on`` enables stack traces.
- ``set exception-debugger on`` enables the post-mortem debugger.
"""
# This is for unit tests so they fail on exceptions instead of displaying them.
if getattr(sys, '_pwndbg_unittest_run', False) is True:
E, V, T = sys.exc_info()
e = E(V)
e.__traceback__ = T
raise e
# Display the error
if debug or verbose:
exception_msg = traceback.format_exc()
print(exception_msg)
inform_report_issue(exception_msg)
else:
exc_type, exc_value, exc_traceback = sys.exc_info()
print(message.error('Exception occured: {}: {} ({})'.format(name, exc_value, exc_type)))
print(message.notice('For more info invoke `') +
message.hint('set exception-verbose on') +
message.notice('` and rerun the command\nor debug it by yourself with `') +
message.hint('set exception-debugger on') +
message.notice('`'))
# Break into the interactive debugger
if debug:
with pwndbg.stdio.stdio:
pdb.post_mortem()
@functools.wraps(pdb.set_trace)
def set_trace():
"""Enable sane debugging in Pwndbg by switching to the "real" stdio.
"""
debugger = pdb.Pdb(stdin=sys.__stdin__,
stdout=sys.__stdout__,
skip=['pwndbg.stdio', 'pwndbg.exception'])
debugger.set_trace()
pdb.set_trace = set_trace
@pwndbg.config.Trigger([verbose, debug])
def update():
if verbose or debug:
command = 'set python print-stack full'
else:
command = 'set python print-stack message'
gdb.execute(command, from_tty=True, to_string=True)
|
{
"content_hash": "31bbfb2d2dcf76c827c4cf8437abaf02",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 139,
"avg_line_length": 31.36,
"alnum_prop": 0.6549744897959183,
"repo_name": "cebrusfs/217gdb",
"id": "069642eafc65a95afc57e95bdaf61ed251bbb04f",
"size": "3182",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pwndbg/exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "584"
},
{
"name": "C",
"bytes": "113"
},
{
"name": "Go",
"bytes": "58"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Python",
"bytes": "1824522"
},
{
"name": "Shell",
"bytes": "6068"
}
],
"symlink_target": ""
}
|
import pickle
import json
import gzip
import math
from sniffles import sv
class SNFile:
def __init__(self,config,handle,filename=None):
self.config=config
self.handle=handle
self.filename=filename
self.blocks={}
self.index={}
self.total_length=0
def store(self,svcand):
block_index=int(svcand.pos/self.config.snf_block_size)*self.config.snf_block_size
if not block_index in self.blocks:
self.blocks[block_index]={svtype: [] for svtype in sv.TYPES}
self.blocks[block_index]["_COVERAGE"]={}
if not self.config.output_rnames:
svcand.rnames=None
self.blocks[block_index][svcand.svtype].append(svcand)
def annotate_block_coverages(self,lead_provider,resolution=500):
config=self.config
start_bin=lead_provider.covrtab_min_bin
end_bin=int(lead_provider.end/config.coverage_binsize)*config.coverage_binsize
coverage_fwd=0
coverage_rev=0
coverage_sum=0
bin_count=0
coverage_binsize_combine=self.config.coverage_binsize_combine
snf_block_size=config.snf_block_size
for bin in range(start_bin, end_bin+config.coverage_binsize,config.coverage_binsize):
if bin in lead_provider.covrtab_fwd:
coverage_fwd+=lead_provider.covrtab_fwd[bin]
if bin in lead_provider.covrtab_rev:
coverage_rev+=lead_provider.covrtab_rev[bin]
coverage_sum+=coverage_fwd+coverage_rev
bin_count+=1
if bin%coverage_binsize_combine==0:
block_index=int(bin/snf_block_size)*snf_block_size
coverage_total_curr=math.ceil(coverage_sum/float(bin_count))
if coverage_total_curr > 0:
if not block_index in self.blocks:
self.blocks[block_index]={svtype: [] for svtype in sv.TYPES}
self.blocks[block_index]["_COVERAGE"]={}
self.blocks[block_index]["_COVERAGE"][bin]=coverage_total_curr
coverage_sum=0
bin_count=0
def serialize_block(self,block_id):
return pickle.dumps(self.blocks[block_id])
def unserialize_block(self,data):
return pickle.loads(data)
def write_and_index(self):
offset=0
for block_id in sorted(self.blocks):
data=gzip.compress(self.serialize_block(block_id))
self.handle.write(data)
data_len=len(data)
self.index[block_id]=(offset,data_len)
offset+=data_len
self.total_length+=data_len
def read_header(self):
try:
header_text=self.handle.readline()
self.header_length=len(header_text)
self.header=json.loads(header_text.strip())
except Exception as e:
print(f"Error when reading SNF header from '{self.filename}': {e}. The file may not be a valid .snf file or could have been corrupted.")
raise e
self.index=self.header["index"]
def read_blocks(self,contig,block_index):
block_index=str(block_index)
if not contig in self.index:
return None
if not block_index in self.index[contig]:
return None
blocks=[]
for block_data_start,block_data_length in self.index[contig][block_index]:
try:
self.handle.seek(self.header_length+block_data_start)
data=gzip.decompress(self.handle.read(block_data_length))
blocks.append(self.unserialize_block(data))
except Exception as e:
print(f"Error when reading block '{contig}.{block_index}' from '{self.filename}': {e}. The file may not be a valid .snf file or could have been corrupted.")
raise e
return blocks
def get_index(self):
return self.index
def get_total_length(self):
return self.total_length
def close(self):
self.handle.close()
|
{
"content_hash": "d8ca2df8360d4a468123063f703e708f",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 172,
"avg_line_length": 35.208695652173915,
"alnum_prop": 0.6033588540380341,
"repo_name": "fritzsedlazeck/Sniffles",
"id": "6fea3235a9339384e120560b7d9b2bbcd3b4b38d",
"size": "4240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sniffles/snf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "166782"
}
],
"symlink_target": ""
}
|
from django.core.files.base import File
from django.core.files.storage import FileSystemStorage, Storage
from django.utils.deconstruct import deconstructible
@deconstructible
class DummyExternalStorage(Storage):
def __init__(self, *args, **kwargs):
self.wrapped = FileSystemStorage(*args, **kwargs)
def path(self, name):
# Overridden to give it the behaviour of the base Storage class
# This is what an external storage backend would have
raise NotImplementedError("This backend doesn't support absolute paths.")
def _open(self, name, mode='rb'):
# Overridden to return a DummyExternalStorageFile instead of a normal
# File object
return DummyExternalStorageFile(open(self.wrapped.path(name), mode))
# Wrap all other functions
def _save(self, name, content):
return self.wrapped._save(name, content)
def delete(self, name):
self.wrapped.delete(name)
def exists(self, name):
return self.wrapped.exists(name)
def listdir(self, path):
return self.wrapped.listdir(path)
def size(self, name):
return self.wrapped.size(name)
def url(self, name):
return self.wrapped.url(name)
def accessed_time(self, name):
return self.wrapped.accessed_time(name)
def created_time(self, name):
return self.wrapped.created_time(name)
def modified_time(self, name):
return self.wrapped.modified_time(name)
class DummyExternalStorageError(Exception):
pass
class DummyExternalStorageFile(File):
def open(self, mode=None):
# Based on:
# https://github.com/django/django/blob/2c39f282b8389f47fee4b24e785a58567c6c3629/django/core/files/base.py#L135-L141
# I've commented out two lines of this function which stops it checking
# the filesystem for the file. Making it behave as if it is using an
# external file storage.
if not self.closed:
self.seek(0)
# elif self.name and os.path.exists(self.name):
# self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def size(self):
try:
return super().size
except Exception as e:
raise DummyExternalStorageError(str(e))
|
{
"content_hash": "477f9e40ed0676a5e7604cc30cf48fda",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 124,
"avg_line_length": 31.08,
"alnum_prop": 0.6653796653796654,
"repo_name": "torchbox/wagtail",
"id": "b9dcca47e16ebd568b325f44a0c39b9d2ce3db76",
"size": "2723",
"binary": false,
"copies": "16",
"ref": "refs/heads/stable/2.15.x",
"path": "wagtail/tests/dummy_external_storage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "178240"
},
{
"name": "HTML",
"bytes": "307456"
},
{
"name": "JavaScript",
"bytes": "123792"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2786743"
},
{
"name": "Shell",
"bytes": "7997"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.views.generic.simple import direct_to_template
from django.utils.datastructures import MultiValueDictKeyError
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
from forms import DashboardPreferencesForm
from models import DashboardPreferences
@login_required
@csrf_exempt
def set_preferences(request):
"""
This view serves and validates a preferences form.
"""
try:
preferences = DashboardPreferences.objects.get(user=request.user, \
dashboard_id=request.POST['dashboard_id'])
except (DashboardPreferences.DoesNotExist, MultiValueDictKeyError):
preferences = None
if request.method == "POST":
form = DashboardPreferencesForm(
user=request.user,
dashboard_id=request.POST['dashboard_id'],
data=request.POST,
instance=preferences
)
if form.is_valid():
preferences = form.save()
if request.is_ajax():
return HttpResponse('true')
request.user.message_set.create(message='Preferences saved')
elif request.is_ajax():
return HttpResponse('false')
else:
form = DashboardPreferencesForm(user=request.user, dashboard_id="", instance=preferences)
return direct_to_template(request, 'admin_tools/dashboard/preferences_form.html', {
'form': form,
})
|
{
"content_hash": "328ad9b7edc934d8b9d481d984387290",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 97,
"avg_line_length": 35.68181818181818,
"alnum_prop": 0.6828025477707006,
"repo_name": "noxan/django-admin-tools",
"id": "9e836828289cab84e8d6cef8893d0a4004b706ce",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin_tools/dashboard/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15919"
},
{
"name": "Python",
"bytes": "116972"
},
{
"name": "Shell",
"bytes": "3394"
}
],
"symlink_target": ""
}
|
"""Page object models for the info widget of the object"""
from lib import base
from lib.constants import locator
from lib.element import widget_info
class Widget(base.Widget):
"""Abstract class for all info widgets"""
_locator = None
_dropdown_settings_cls = None
def __init__(self, driver):
# wait that the elements load before calling super
self.button_settings = base.Button(
driver, locator.BaseInfoWidget.BUTTON_SETTINGS)
self.title = base.Label(driver, self._locator.TITLE)
self.title_entered = base.Label(
driver, self._locator.TITLE_ENTERED)
super(Widget, self).__init__(driver)
self.object_id = self.url.split("/")[-1]
def press_object_settings(self):
"""
Returns:
widget_info.DropdownSettings
"""
self.button_settings.click()
return self._dropdown_settings_cls(self._driver)
class DashboardInfo(Widget):
"""Model for the dashboard info widget"""
_locator = locator.Dashboard
def __init__(self, driver):
super(DashboardInfo, self).__init__(driver)
self.button_start_new_program = base.Button(
self._driver, self._locator.BUTTON_START_NEW_PROGRAM)
self.button_start_new_audit = base.Button(
self._driver, self._locator.BUTTON_START_NEW_AUDIT)
self.button_start_new_workflow = base.Button(
self._driver, self._locator.BUTTON_START_NEW_WORKFLOW)
self.button_create_new_object = base.Button(
self._driver, self._locator.BUTTON_CREATE_NEW_OBJECT)
self.button_all_objects = base.Button(
self._driver, self._locator.BUTTON_ALL_OBJECTS)
def start_new_program(self):
raise NotImplementedError
def start_new_audit(self):
raise NotImplementedError
def start_new_workflow(self):
raise NotImplementedError
def create_new_object(self):
raise NotImplementedError
def browse_all_objects(self):
raise NotImplementedError
class Programs(Widget):
"""Model for program object info widget"""
_locator = locator.WidgetInfoProgram
_dropdown_settings_cls = widget_info.Programs
def __init__(self, driver):
"""
Args:
driver (base.CustomDriver)
"""
super(Programs, self).__init__(driver)
self.show_advanced = base.Toggle(
self._driver, self._locator.TOGGLE_SHOW_ADVANCED)
# activate all fields
self.show_advanced.toggle()
self.object_review = base.Label(
self._driver, self._locator.OBJECT_REVIEW)
self.submit_for_review = base.Label(
self._driver, self._locator.SUBMIT_FOR_REVIEW)
self.description = base.Label(self._driver, self._locator.DESCRIPTION)
self.description_entered = base.Label(
self._driver, self._locator.DESCRIPTION_ENTERED)
self.notes = base.Label(self._driver, self._locator.NOTES)
self.notes_entered = base.Label(
self._driver, self._locator.NOTES_ENTERED)
self.manager = base.Label(self._driver, self._locator.MANAGER)
self.manager_entered = base.Label(
self._driver, self._locator.MANAGER_ENTERED)
self.program_url = base.Label(self._driver, self._locator.PROGRAM_URL)
self.program_url_entered = base.Label(
self._driver, self._locator.PROGRAM_URL_ENTERED)
self.code = base.Label(self._driver, self._locator.CODE)
self.code_entered = base.Label(
self._driver, self._locator.CODE_ENTERED)
self.effective_date = base.Label(
self._driver, self._locator.EFFECTIVE_DATE)
self.effective_date_entered = base.Label(
self._driver, self._locator.EFFECTIVE_DATE_ENTERED)
self.stop_date = base.Label(self._driver, self._locator.STOP_DATE)
self.stop_date_entered = base.Label(
self._driver, self._locator.STOP_DATE_ENTERED)
self.primary_contact = base.Label(
self._driver, self._locator.PRIMARY_CONTACT)
self.primary_contact_entered = base.Label(
self._driver, self._locator.PRIMARY_CONTACT_ENTERED)
self.secondary_contact = base.Label(
self._driver, self._locator.SECONDARY_CONTACT)
self.secondary_contact_entered = base.Label(
self._driver, self._locator.SECONDARY_CONTACT_ENTERED)
self.reference_url = base.Label(
self._driver, self._locator.REFERENCE_URL)
self.reference_url_entered = base.Label(
self._driver, self._locator.REFERENCE_URL_ENTERED)
class Workflows(Widget):
"""Model for workflow object info widget"""
_locator = locator.WidgetInfoWorkflow
class Audits(Widget):
"""Model for audit object info widget"""
_locator = locator.WidgetInfoAudit
class Assessments(Widget):
"""Model for assessment object info widget"""
_locator = locator.WidgetInfoAssessment
class Issues(Widget):
"""Model for issue object info widget"""
_locator = locator.WidgetInfoIssue
class Regulations(Widget):
"""Model for regulation object info widget"""
_locator = locator.WidgetInfoRegulations
class Policies(Widget):
"""Model for policies object info widget"""
_locator = locator.WidgetInfoPolicy
class Standards(Widget):
"""Model for standard object info widget"""
_locator = locator.WidgetInfoStandard
class Contracts(Widget):
"""Model for contract object info widget"""
_locator = locator.WidgetInfoContract
class Clauses(Widget):
"""Model for clause object info widget"""
_locator = locator.WidgetInfoClause
class Sections(Widget):
"""Model for selection object info widget"""
_locator = locator.WidgetInfoSection
class Controls(Widget):
"""Model for control object info widget"""
_locator = locator.WidgetInfoControl
_dropdown_settings_cls = widget_info.Controls
class Objectives(Widget):
"""Model for objectives object info widget"""
_locator = locator.WidgetInfoObjective
class People(base.Widget):
"""Model for people object info widget"""
_locator = locator.WidgetInfoPeople
class OrgGroups(Widget):
"""Model for org groups object info widget"""
_locator = locator.WidgetInfoOrgGroup
_dropdown_settings_cls = widget_info.OrgGroups
class Vendors(Widget):
"""Model for vendors object info widget"""
_locator = locator.WidgetInfoVendor
class AccessGroup(Widget):
"""Model for access group object info widget"""
_locator = locator.WidgetInfoAccessGroup
class Systems(Widget):
"""Model for system object info widget"""
_locator = locator.WidgetInfoSystem
_dropdown_settings_cls = widget_info.Systems
class Processes(Widget):
"""Model for process object info widget"""
_locator = locator.WidgetInfoProcess
_dropdown_settings_cls = widget_info.Processes
class DataAssets(Widget):
"""Model for data asset object info widget"""
_locator = locator.WidgetInfoDataAsset
_dropdown_settings_cls = widget_info.DataAssets
class Products(Widget):
"""Model for product object info widget"""
_locator = locator.WidgetInfoProduct
_dropdown_settings_cls = widget_info.Products
class Projects(Widget):
"""Model for project object info widget"""
_locator = locator.WidgetInfoProject
_dropdown_settings_cls = widget_info.Projects
class Facilities(Widget):
"""Model for facility object info widget"""
_locator = locator.WidgetInfoFacility
class Markets(Widget):
"""Model for market object info widget"""
_locator = locator.WidgetInfoMarket
class Risks(Widget):
"""Model for risk object info widget"""
_locator = locator.WidgetInfoRisk
class Threats(Widget):
"""Model for threat object info widget"""
_locator = locator.WidgetInfoThreat
|
{
"content_hash": "9cfdb39dab3a9229ed64b0ce003ba675",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 74,
"avg_line_length": 29.149019607843137,
"alnum_prop": 0.7095385443293422,
"repo_name": "andrei-karalionak/ggrc-core",
"id": "413d454a01c2a2c23c26a7bc8f9f0e8df8731038",
"size": "7546",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/selenium/src/lib/page/widget/info_widget.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "201100"
},
{
"name": "Cucumber",
"bytes": "136322"
},
{
"name": "HTML",
"bytes": "1064828"
},
{
"name": "JavaScript",
"bytes": "1786503"
},
{
"name": "Makefile",
"bytes": "7017"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2583715"
},
{
"name": "Shell",
"bytes": "31914"
}
],
"symlink_target": ""
}
|
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.fields
import olympia.amo.models
import olympia.amo.validators
import olympia.users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(default='', max_length=255, unique=True)),
('display_name', models.CharField(blank=True, default='', max_length=50, null=True, validators=[django.core.validators.MinLengthValidator(2), olympia.amo.validators.OneOrMorePrintableCharacterValidator()])),
('email', models.EmailField(max_length=75, null=True, unique=True)),
('averagerating', models.FloatField(null=True)),
('biography', models.TextField(blank=True, null=True)),
('deleted', models.BooleanField(default=False)),
('display_collections', models.BooleanField(default=False)),
('homepage', models.URLField(blank=True, default='', max_length=255)),
('location', models.CharField(blank=True, default='', max_length=255)),
('notes', models.TextField(blank=True, null=True)),
('occupation', models.CharField(blank=True, default='', max_length=255)),
('picture_type', models.CharField(blank=True, default=None, max_length=75, null=True)),
('read_dev_agreement', models.DateTimeField(blank=True, null=True)),
('last_login_ip', models.CharField(default='', editable=False, max_length=45)),
('email_changed', models.DateTimeField(editable=False, null=True)),
('banned', models.DateTimeField(editable=False, null=True)),
('is_public', models.BooleanField(db_column='public', default=False)),
('fxa_id', models.CharField(blank=True, max_length=128, null=True)),
('auth_id', models.PositiveIntegerField(default=olympia.users.models.generate_auth_id, null=True)),
('basket_token', models.CharField(blank=True, default='', max_length=128)),
('bypass_upload_restrictions', models.BooleanField(default=False)),
('reviewer_name', models.CharField(blank=True, default='', max_length=50, null=True, validators=[django.core.validators.MinLengthValidator(2)])),
],
options={
'db_table': 'users',
},
bases=(olympia.amo.models.OnChangeMixin, olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='DeniedName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(default='', max_length=255, unique=True)),
],
options={
'db_table': 'users_denied_name',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='DisposableEmailDomainRestriction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('domain', models.CharField(help_text='Enter full disposable email domain that should be blocked. Wildcards are not supported: if you need those, or need to match against the entire email and not just the domain part, use "Email user restrictions" instead.', max_length=255, unique=True)),
],
options={
'db_table': 'users_disposable_email_domain_restriction',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='EmailUserRestriction',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('email_pattern', models.CharField(help_text='Either enter full domain or email that should be blocked or use glob-style wildcards to match other patterns. E.g "@*.mail.com"\n Please note that we do not include "@" in the match so you should do that in the pattern.', max_length=100, verbose_name='Email Pattern')),
],
options={
'db_table': 'users_user_email_restriction',
},
bases=(olympia.users.models.NormalizeEmailMixin, olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='IPNetworkUserRestriction',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('network', olympia.amo.fields.CIDRField(blank=True, help_text='Enter a valid IPv4 or IPv6 CIDR network range, eg. 127.0.0.1/28', null=True)),
],
options={
'db_table': 'users_user_network_restriction',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='UserRestrictionHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('restriction', models.PositiveSmallIntegerField(choices=[(0, 'DeveloperAgreementRestriction'), (1, 'DisposableEmailDomainRestriction'), (2, 'EmailUserRestriction'), (3, 'IPNetworkUserRestriction'), (4, 'EmailReputationRestriction'), (5, 'IPReputationRestriction')], default=0)),
('ip_address', models.CharField(default='', max_length=45)),
('last_login_ip', models.CharField(default='', max_length=45)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='restriction_history', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='UserNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('notification_id', models.IntegerField()),
('enabled', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifications', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'users_notifications',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='UserHistory',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('email', models.EmailField(max_length=75)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'users_history',
'ordering': ('-created',),
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.AddIndex(
model_name='userprofile',
index=models.Index(fields=['created'], name='created'),
),
migrations.AddIndex(
model_name='userprofile',
index=models.Index(fields=['fxa_id'], name='users_fxa_id_index'),
),
migrations.AddIndex(
model_name='usernotification',
index=models.Index(fields=['user'], name='user_id'),
),
migrations.AddIndex(
model_name='userhistory',
index=models.Index(fields=['email'], name='users_history_email'),
),
migrations.AddIndex(
model_name='userhistory',
index=models.Index(fields=['user'], name='users_history_user_idx'),
),
]
|
{
"content_hash": "ff6475fff3a0343f19b3808b3da02ea2",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 333,
"avg_line_length": 59.0625,
"alnum_prop": 0.6055796055796056,
"repo_name": "mozilla/olympia",
"id": "9462eb238b1a9ebc675e6ec7db616f080ef058d9",
"size": "10444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/users/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3997396"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
"""
Extension to Python Markdown for Embedded Audio.
Basic Example:
>>> import markdown
>>> text = "[podcast]https://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3[/podcast]"
>>> html = markdown.markdown(text, [PodcastExtension()])
>>> print(html)
<p><audio controls=""><source src="https://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3" type="audio/mpeg"></source></audio></p>
"""
from nikola.plugin_categories import MarkdownExtension
try:
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
except ImportError:
# No need to catch this, if you try to use this without Markdown,
# the markdown compiler will fail first
Pattern = Extension = object
PODCAST_RE = r'\[podcast\](?P<url>.+)\[/podcast\]'
class PodcastPattern(Pattern):
"""InlinePattern for footnote markers in a document's body text."""
def __init__(self, pattern, configs):
"""Initialize pattern."""
Pattern.__init__(self, pattern)
def handleMatch(self, m):
"""Handle pattern matches."""
url = m.group('url').strip()
audio_elem = etree.Element('audio')
audio_elem.set('controls', '')
source_elem = etree.SubElement(audio_elem, 'source')
source_elem.set('src', url)
source_elem.set('type', 'audio/mpeg')
return audio_elem
class PodcastExtension(MarkdownExtension, Extension):
"""Podcast extension for Markdown."""
def __init__(self, configs={}):
"""Initialize extension."""
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals=None):
"""Extend Markdown."""
podcast_md_pattern = PodcastPattern(PODCAST_RE, self.getConfigs())
podcast_md_pattern.md = md
md.inlinePatterns.register(podcast_md_pattern, 'podcast', 175)
md.registerExtension(self)
def makeExtension(configs=None): # pragma: no cover
"""Make Markdown extension."""
return PodcastExtension(configs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
|
{
"content_hash": "d909d9341424ef768c933f920be81b4f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 142,
"avg_line_length": 32.55555555555556,
"alnum_prop": 0.6535836177474402,
"repo_name": "okin/nikola",
"id": "5ffcb210f2ba78c0f428eaf644ab59fbebf4d5ca",
"size": "3641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/compile/markdown/mdx_podcast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25780"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "1888"
},
{
"name": "Jupyter Notebook",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1236378"
},
{
"name": "Shell",
"bytes": "9822"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from flask import request
#from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask_login import UserMixin,AnonymousUserMixin
from app import login_manager
from app import db
from datetime import datetime
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
timestamp = db.Column(db.DateTime, default = datetime.utcnow)
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
firstname = db.Column(db.String(50),nullable = True)
lastname = db.Column(db.String(50),nullable = True)
email = db.Column(db.String(50),nullable = True)
username = db.Column(db.String(64),nullable = True)
password = db.Column(db.String(100),nullable = True)
password_hash = db.Column(db.String(128), nullable = True)
confirmed = db.Column(db.Boolean, default = False)
question = db.relationship("Question", backref = "owner", lazy = 'dynamic')
location = db.Column(db.String(64),nullable = True)
about_me = db.Column(db.Text(),nullable = True)
member_since = db.Column(db.DateTime(), default=datetime.utcnow,nullable = True)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow,nullable = True)
posts = db.relationship('Posts', backref = 'author', lazy = 'dynamic')
fetchedChat = db.relationship('Chats', backref = 'messenger', lazy = 'dynamic')
followed = db.relationship('Follow', foreign_keys = [Follow.follower_id],backref=db.backref('follower', lazy='joined'),lazy='dynamic',cascade='all, delete-orphan')
followers = db.relationship('Follow', foreign_keys = [Follow.followed_id], backref = db.backref('followed', lazy = 'joined'),lazy='dynamic',cascade='all, delete-orphan')
#role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
# def __init__(self, **kwargs):
# super(User, self).__init__(**kwargs)
# if self.role is None:
# if self.email == current_app.config['FLASKY_ADMIN']:
# self.role = Role.query.filter_by(permissions=0xff).first()
# if self.role is None:
# self.role = Role.query.filter_by(default=True).first()
# def __repr__(self):
# return "<User %s>" % self.firstname
#Related to werkzeug security
@property
def password(self):
raise AttributeError('password is not a readable attribute')
#Used for generating hashes of passwords
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
#Verification of password n database
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def follow(self, user):
if not self.is_following(user):
f = Follow(follower = self, followed = user)
db.session.add(f)
db.session.commit()
def unfollow(self, user):
f = self.followed.filter_by(followed_id = user.id).first()
if f:
db.session.delete(f)
db.session.commit()
def is_following(self, user):
return self.followed.filter_by(followed_id = user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(follower_id = user.id).first() is not None
# Another table containing questions of users
class Question(db.Model):
__tablename__ = "questions"
id = db.Column(db.Integer, primary_key = True)
questions = db.Column(db.String(500))
topic = db.Column(db.String(500))
link = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
question = db.relationship("Chats", backref = "chat_no", lazy = 'dynamic')
upvotes = db.Column(db.Integer, nullable = True, default = 1)
class Chats(db.Model):
__tablename__ = "chats"
id = db.Column(db.Integer, primary_key = True)
messages = db.Column(db.String)
time = db.Column(db.String(100))
chat_id = db.Column(db.Integer, db.ForeignKey('questions.id'))
sender_name = db.Column(db.String, nullable = True)
messenger_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class Posts(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index = True, default = datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# class Role():
# __tablename__ = 'roles'
# id = db.Column(db.Integer,primary_key = True)
# name = db.Column(db.String(64), unique = True)
# default = db.Column(db.Boolean, default = False, index = True)
# permissions = db.Column(db.Integer)
# users = db.relationship('User', backref = 'role', lazy = 'dynamic')
# def can(self,permissions):
# return self.role is not None and (self.role.permissions & permissions) == permissions
# def is_administrator(self):
# return self.can(Permission.ADMINISTER)
# @staticmethod
# def insert_roles():
# roles = {
# 'User': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES, True),
# 'Moderator': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES | Permission.MODERATE_COMMENTS, False),
# 'Administrator': (0xff, False)
# }
# for r in roles:
# role = Role.query.filter_by(name = r).first()
# if role is None:
# role = Role(name = r)
# role.permissions = roles[r][0]
# role.default = roles[r][1]
# db.session.add(role)
# db.session.commit()
# class Permission:
# FOLLOW = 0x01
# COMMENT = 0x02
# WRITE_ARTICLES = 0x04
# MODERATE_COMMENTS = 0x08
# ADMINISTER = 0x80
# class AnonymousUser(AnonymousUserMixin):
# def can(self,permissions):
# return False
# def is_administrator(self):
# return False
# login_manager.anonymous_user = AnonymousUser
# def generate_confirmation_token(self, expiration = 120):
# s = Serializer(app.config['SERIAL_KEY'],expiration)
# return s.dumps({'confirm' : self.id})
# def confirm(self, token):
# s = Serializer(current_app.config['SECRET_KEY'])
# try:
# data = s.loads(token)
# except:
# return False
# if data.get('confirm') != self.id:
# return False
# self.confirmed = True
# db.session.add(self)
# return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
{
"content_hash": "3d196c7318977e28bba6e38a0b092252",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 170,
"avg_line_length": 34.927083333333336,
"alnum_prop": 0.6974351327169699,
"repo_name": "sumedh123/debatify",
"id": "a14e060617ed1a5276ddc95bb31de8b1d10ce84b",
"size": "6706",
"binary": false,
"copies": "1",
"ref": "refs/heads/UI",
"path": "app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "347155"
},
{
"name": "HTML",
"bytes": "102503"
},
{
"name": "JavaScript",
"bytes": "608373"
},
{
"name": "Python",
"bytes": "8393673"
},
{
"name": "Shell",
"bytes": "3298"
}
],
"symlink_target": ""
}
|
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
CData,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup(u'<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(text=u'Räksmörgås'), u'Räksmörgås')
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(text="bar"), [u"bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
[u"Foo", u"bar", u'\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
[u"Foo", u"bar", u'\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self):
soup = self.soup("<a></a>")
# Create a self-referential list.
l = []
l.append(l)
# Without special code in _normalize_search_value, this would cause infinite
# recursion.
self.assertEqual([], soup.find_all(l))
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_name_and_text(self):
self.assertSelects(
self.tree.find_all('a', text='First tag.'), ['First tag.'])
self.assertSelects(
self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.'])
self.assertSelects(
self.tree.find_all('a', text=re.compile("tag")),
['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_utf8_attribute_value(self):
peace = u"םולש".encode("utf8")
data = u'<a title="םולש"></a>'.encode("utf8")
soup = self.soup(data)
self.assertEqual([soup.a], soup.find_all(title=peace))
self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8")))
self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"]))
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
# Passing class='class2' would cause a syntax error.
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
# Passing in a string to 'attrs' will search the CSS class.
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
attrs = { 'class' : re.compile("o") }
f = tree.find_all("gar", attrs=attrs)
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", re.compile("a"))
self.assertSelects(f, ["Found it"])
# Since the class is not the string "foo bar", but the two
# strings "foo" and "bar", this will not find anything.
attrs = { 'class' : re.compile("o b") }
f = tree.find_all("gar", attrs=attrs)
self.assertSelects(f, [])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the attribute as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_atribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<div>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</div>""")
div = tree.div
for i, element in enumerate(div.contents):
self.assertEqual(i, div.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_unwrap_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.unwrap()
self.assertEqual(a, new_a)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_append_child_thats_already_at_the_end(self):
data = "<a><b></b></a>"
soup = self.soup(data)
soup.a.append(soup.b)
self.assertEqual(data, soup.decode())
def test_move_tag_to_beginning_of_parent(self):
data = "<a><b></b><c></c><d></d></a>"
soup = self.soup(data)
soup.a.insert(0, soup.d)
self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode())
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_valueerror_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(ValueError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_valueerror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(ValueError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_replace_first_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.b.replace_with(soup.c)
self.assertEqual("<a><c></c></a>", soup.decode())
def test_replace_last_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.c.replace_with(soup.b)
self.assertEqual("<a><b></b></a>", soup.decode())
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_unwrap(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.unwrap()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_wrap(self):
soup = self.soup("I wish I was bold.")
value = soup.string.wrap(soup.new_tag("b"))
self.assertEqual(value.decode(), "<b>I wish I was bold.</b>")
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_extracts_tag_from_elsewhere(self):
soup = self.soup("<b></b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_puts_new_contents_at_the_end(self):
soup = self.soup("<b>I like being bold.</b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(2, len(soup.b.contents))
self.assertEqual(
soup.decode(), self.document_for(
"<b>I like being bold.I wish I was bold.</b>"))
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertFalse(hasattr(em, "contents"))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
def test_string_set_does_not_affect_original_string(self):
soup = self.soup("<a><b>foo</b><c>bar</c>")
soup.b.string = soup.c.string
self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>")
def test_set_string_preserves_class_of_string(self):
soup = self.soup("<a></a>")
cdata = CData("foo")
soup.a.string = cdata
self.assertTrue(isinstance(soup.a.string, CData))
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:leonardr@segfault.org">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = u"<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for(u"<b><FOO></b><b>BAR</b>"))
def test_formatter_is_run_on_attribute_values(self):
markup = u'<a href="http://a.com?a=b&c=é">e</a>'
soup = self.soup(markup)
a = soup.a
expect_minimal = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_minimal, a.decode())
self.assertEqual(expect_minimal, a.decode(formatter="minimal"))
expect_html = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_html, a.decode(formatter="html"))
self.assertEqual(markup, a.decode(formatter=None))
expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>'
self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper()))
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>")
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(unicode, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset apprears unchanged.
self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
u"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
def test_decode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents())
def test_encode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents(
encoding="utf8"))
def test_deprecated_renderContents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents())
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_cdata_is_never_formatted(self):
"""Text inside a CData object is passed into the formatter.
But the return value is ignored.
"""
self.count = 0
def increment(*args):
self.count += 1
return "BITTER FAILURE"
soup = self.soup("")
cdata = CData("<><><>")
soup.insert(1, cdata)
self.assertEqual(
b"<![CDATA[<><><>]]>", soup.encode(formatter=increment))
self.assertEqual(1, self.count)
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<div id="main">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
</span>
</div>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML)
def assertSelects(self, selector, expected_ids):
el_ids = [el['id'] for el in self.soup.select(selector)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, [u'The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 3)
for div in els:
self.assertEqual(div.name, 'div')
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['main', 'inner', 'footer'])
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertEqual(len(self.soup.select('tag%t')), 0)
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse(els[0].has_key('class'))
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1']),
('div[id$="1"]', []),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('a[href*="http://"]', ['bob', 'me']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1']),
('div[id*="1"]', []),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
)
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner'])
|
{
"content_hash": "06dd8741fa832418be5445a27cdf105f",
"timestamp": "",
"source": "github",
"line_count": 1694,
"max_line_length": 118,
"avg_line_length": 38.31404958677686,
"alnum_prop": 0.5646339208677431,
"repo_name": "yarray/inkpress",
"id": "cc573edef30a03b51c9aba88df197b6947a9fca6",
"size": "64948",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "compiler/lib/bs4/tests/test_tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48844"
},
{
"name": "JavaScript",
"bytes": "36902"
},
{
"name": "Python",
"bytes": "352321"
}
],
"symlink_target": ""
}
|
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
import jmespath
@resources.register('cloudbilling-account')
class CloudBillingAccount(QueryResourceManager):
class resource_type(TypeInfo):
service = 'cloudbilling'
version = 'v1'
component = 'billingAccounts'
enum_spec = ('list', 'billingAccounts[]', None)
get_requires_event = True
scope = None
name = id = 'name'
default_report_fields = ['id', 'displayName']
asset_type = "cloudbilling.googleapis.com/BillingAccount"
permissions = ('billing.accounts.list',)
@staticmethod
def get(client, event):
return client.execute_query(
'get', {'name': jmespath.search(
'protoPayload.response.billingAccountInfo.billingAccountName', event)})
|
{
"content_hash": "f998dde4504c514f808bde8ef373ca01",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 91,
"avg_line_length": 33.92307692307692,
"alnum_prop": 0.6439909297052154,
"repo_name": "capitalone/cloud-custodian",
"id": "443694fc4c5bbccfa221b82476f15858abce352b",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_gcp/c7n_gcp/resources/cloudbilling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
}
|
import warnings as _warnings
from saml2.cryptography.symmetric import AESCipher as _AESCipher
_deprecation_msg = (
'{name} {type} is deprecated. '
'It will be removed in the next version. '
'Use saml2.cryptography.symmetric instead.'
).format(name=__name__, type='module')
_warnings.warn(_deprecation_msg, DeprecationWarning)
AESCipher = _AESCipher
POSTFIX_MODE = _AESCipher.POSTFIX_MODE
AES_BLOCK_SIZE = _AESCipher.AES_BLOCK_SIZE
|
{
"content_hash": "b92d56cead355b8feb1c4ec31a0623bc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 28,
"alnum_prop": 0.7388392857142857,
"repo_name": "cloudera/hue",
"id": "6f37fb336824406f5261f4c2c514f9554b503e5f",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pysaml2-4.9.0/src/saml2/aes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
import argparse
import collections
import logging
import os
import shutil
import signal
import StringIO
import subprocess
import sys
import tempfile
import threading
import time
import cdec
import aligner
import decoder
import util
# Dummy input token that is unlikely to appear in normalized data (but no fatal errors if it does)
LIKELY_OOV = '(OOV)'
# For parsing rt.ini
TRUE = ('true', 'True', 'TRUE')
logger = logging.getLogger('rt')
class ExtractorWrapper:
'''Wrap cdec.sa.GrammarExtractor. Used to keep multiple instances of the extractor from causing Python to segfault.
Do not use directly unless you know what you're doing.'''
def __init__(self, config):
# Make sure pycdec is on PYTHONPATH
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
pycdec = os.path.join(cdec_root, 'python')
env = os.environ.copy()
python_path = env.get('PYTHONPATH', '')
if 'cdec/python' not in python_path:
python_path = '{}:{}'.format(python_path, pycdec) if len(python_path) > 0 else pycdec
env['PYTHONPATH'] = python_path
# Start grammar extractor as separate process using stdio
cmd = ['python', '-m', 'cdec.sa.extract', '-o', '-z', '-c', config, '-t']
logger.info('Executing: {}'.format(' '.join(cmd)))
self.p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
util.consume_stream(self.p.stderr)
self.lock = util.FIFOLock()
def close(self, force=False):
if not force:
self.lock.acquire()
self.p.stdin.close()
self.p.wait()
self.lock.release()
else:
os.kill(self.p.pid, signal.SIGTERM)
def drop_ctx(self, ctx_name):
self.lock.acquire()
self.p.stdin.write('{} ||| drop\n'.format(ctx_name))
self.p.stdout.readline()
self.lock.release()
def grammar(self, sentence, grammar_file, ctx_name):
self.lock.acquire()
self.p.stdin.write('{} ||| {} ||| {}\n'.format(ctx_name, sentence, grammar_file))
self.p.stdout.readline()
self.lock.release()
def add_instance(self, source, target, alignment, ctx_name):
self.lock.acquire()
self.p.stdin.write('{} ||| {} ||| {} ||| {}\n'.format(ctx_name, source, target, alignment))
self.p.stdout.readline()
self.lock.release()
class RealtimeDecoder:
'''Do not use directly unless you know what you're doing. Use RealtimeTranslator.'''
def __init__(self, configdir, tmpdir, hpyplm=False, metric='ibm_bleu'):
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self.tmp = tmpdir
os.mkdir(self.tmp)
# HPYPLM reference stream
self.hpyplm = hpyplm
if self.hpyplm:
ref_fifo_file = os.path.join(self.tmp, 'ref.fifo')
os.mkfifo(ref_fifo_file)
self.ref_fifo = open(ref_fifo_file, 'w+')
# Start with empty line (do not learn prior to first input)
self.ref_fifo.write('\n')
self.ref_fifo.flush()
# Decoder
decoder_config = [[f.strip() for f in line.split('=')] for line in open(os.path.join(configdir, 'cdec.ini'))]
util.cdec_ini_for_realtime(decoder_config, os.path.abspath(configdir), ref_fifo_file if self.hpyplm else None)
decoder_config_file = os.path.join(self.tmp, 'cdec.ini')
with open(decoder_config_file, 'w') as output:
for (k, v) in decoder_config:
output.write('{}={}\n'.format(k, v))
decoder_weights = os.path.join(configdir, 'weights.final')
self.decoder = decoder.MIRADecoder(decoder_config_file, decoder_weights, metric=metric)
def close(self, force=False):
logger.info('Closing decoder and removing {}'.format(self.tmp))
self.decoder.close(force)
if self.hpyplm:
self.ref_fifo.close()
shutil.rmtree(self.tmp)
class RealtimeTranslator:
'''Main entry point into API: serves translations to any number of concurrent users'''
def __init__(self, configdir, tmpdir='/tmp', cache_size=5, norm=False):
# name -> (method, set of possible nargs)
self.COMMANDS = {
'TR': (self.translate, set((1,))),
'LEARN': (self.learn, set((2,))),
'SAVE': (self.save_state, set((0, 1))),
'LOAD': (self.load_state, set((0, 1))),
'DROP': (self.drop_ctx, set((0,))),
'LIST': (self.list_ctx, set((0,))),
}
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# rt.ini options
ini = dict(line.strip().split('=') for line in open(os.path.join(configdir, 'rt.ini')))
self.hpyplm = (ini.get('hpyplm', 'false') in TRUE)
self.metric = ini.get('metric', 'ibm_bleu')
### Single instance for all contexts
self.config = configdir
# Temporary work dir
self.tmp = tempfile.mkdtemp(dir=tmpdir, prefix='realtime.')
logger.info('Using temp dir {}'.format(self.tmp))
# Normalization
self.norm = norm
if self.norm:
self.tokenizer = util.popen_io([os.path.join(cdec_root, 'corpus', 'tokenize-anything.sh'), '-u'])
self.tokenizer_lock = util.FIFOLock()
self.detokenizer = util.popen_io([os.path.join(cdec_root, 'corpus', 'untok.pl')])
self.detokenizer_lock = util.FIFOLock()
# Word aligner
fwd_params = os.path.join(configdir, 'a.fwd_params')
fwd_err = os.path.join(configdir, 'a.fwd_err')
rev_params = os.path.join(configdir, 'a.rev_params')
rev_err = os.path.join(configdir, 'a.rev_err')
self.aligner = aligner.ForceAligner(fwd_params, fwd_err, rev_params, rev_err)
# Grammar extractor
sa_config = cdec.configobj.ConfigObj(os.path.join(configdir, 'sa.ini'), unrepr=True)
sa_config.filename = os.path.join(self.tmp, 'sa.ini')
util.sa_ini_for_realtime(sa_config, os.path.abspath(configdir))
sa_config.write()
self.extractor = ExtractorWrapper(sa_config.filename)
self.cache_size = cache_size
### One instance per context
self.ctx_names = set()
# All context-dependent operations are atomic
self.ctx_locks = collections.defaultdict(util.FIFOLock)
# ctx -> list of (source, target, alignment)
self.ctx_data = {}
# Grammar extractor is not threadsafe
self.extractor_lock = util.FIFOLock()
# ctx -> deque of file
self.grammar_files = {}
# ctx -> dict of {sentence: file}
self.grammar_dict = {}
self.decoders = {}
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
# Force shutdown on exception
self.close(ex_type is not None)
def close(self, force=False):
'''Cleanup'''
if force:
logger.info('Forced shutdown: stopping immediately')
# Drop contexts before closing processes unless forced
if not force:
for ctx_name in list(self.ctx_names):
self.drop_ctx(ctx_name, force)
logger.info('Closing processes')
self.aligner.close(force)
self.extractor.close(force)
if self.norm:
if not force:
self.tokenizer_lock.acquire()
self.detokenizer_lock.acquire()
self.tokenizer.stdin.close()
self.tokenizer.wait()
self.detokenizer.stdin.close()
self.detokenizer.wait()
if not force:
self.tokenizer_lock.release()
self.detokenizer_lock.release()
logger.info('Deleting {}'.format(self.tmp))
shutil.rmtree(self.tmp)
def lazy_ctx(self, ctx_name):
'''Initialize a context (inc starting a new decoder) if needed.
NOT threadsafe, acquire ctx_name lock before calling.'''
if ctx_name in self.ctx_names:
return
logger.info('({}) New context'.format(ctx_name))
self.ctx_names.add(ctx_name)
self.ctx_data[ctx_name] = []
self.grammar_files[ctx_name] = collections.deque()
self.grammar_dict[ctx_name] = {}
tmpdir = os.path.join(self.tmp, 'decoder.{}'.format(ctx_name))
self.decoders[ctx_name] = RealtimeDecoder(self.config, tmpdir, hpyplm=self.hpyplm, metric=self.metric)
def drop_ctx(self, ctx_name=None, force=False):
'''Delete a context (inc stopping the decoder)
Threadsafe and FIFO unless forced.'''
lock = self.ctx_locks[ctx_name]
if not force:
lock.acquire()
if ctx_name not in self.ctx_names:
logger.info('({}) No context found, no action taken'.format(ctx_name))
if not force:
lock.release()
return
logger.info('({}) Dropping context'.format(ctx_name))
self.ctx_names.remove(ctx_name)
self.ctx_data.pop(ctx_name)
self.extractor.drop_ctx(ctx_name)
self.grammar_files.pop(ctx_name)
self.grammar_dict.pop(ctx_name)
self.decoders.pop(ctx_name).close(force)
self.ctx_locks.pop(ctx_name)
if not force:
lock.release()
def list_ctx(self, ctx_name=None):
'''Return a string of active contexts'''
return 'ctx_name ||| {}'.format(' '.join(sorted(str(ctx_name) for ctx_name in self.ctx_names)))
def grammar(self, sentence, ctx_name=None):
'''Extract a sentence-level grammar on demand (or return cached)
Threadsafe wrt extractor but NOT decoder. Acquire ctx_name lock
before calling.'''
self.extractor_lock.acquire()
self.lazy_ctx(ctx_name)
grammar_dict = self.grammar_dict[ctx_name]
grammar_file = grammar_dict.get(sentence, None)
# Cache hit
if grammar_file:
logger.info('({}) Grammar cache hit: {}'.format(ctx_name, grammar_file))
self.extractor_lock.release()
return grammar_file
# Extract and cache
(fid, grammar_file) = tempfile.mkstemp(dir=self.decoders[ctx_name].tmp, prefix='grammar.', suffix='.gz')
os.close(fid)
self.extractor.grammar(sentence, grammar_file, ctx_name)
grammar_files = self.grammar_files[ctx_name]
if len(grammar_files) == self.cache_size:
rm_sent = grammar_files.popleft()
# If not already removed by learn method
if rm_sent in grammar_dict:
rm_grammar = grammar_dict.pop(rm_sent)
os.remove(rm_grammar)
grammar_files.append(sentence)
grammar_dict[sentence] = grammar_file
self.extractor_lock.release()
return grammar_file
def translate(self, sentence, ctx_name=None):
'''Decode a sentence (inc extracting a grammar if needed)
Threadsafe, FIFO'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
# Empty in, empty out
if sentence.strip() == '':
lock.release()
return ''
if self.norm:
sentence = self.tokenize(sentence)
logger.info('({}) Normalized input: {}'.format(ctx_name, sentence))
grammar_file = self.grammar(sentence, ctx_name)
decoder = self.decoders[ctx_name]
start_time = time.time()
hyp = decoder.decoder.decode(sentence, grammar_file)
stop_time = time.time()
logger.info('({}) Translation time: {} seconds'.format(ctx_name, stop_time - start_time))
# Empty reference: HPYPLM does not learn prior to next translation
if self.hpyplm:
decoder.ref_fifo.write('\n')
decoder.ref_fifo.flush()
if self.norm:
logger.info('({}) Normalized translation: {}'.format(ctx_name, hyp))
hyp = self.detokenize(hyp)
lock.release()
return hyp
def tokenize(self, line):
self.tokenizer_lock.acquire()
self.tokenizer.stdin.write('{}\n'.format(line))
tok_line = self.tokenizer.stdout.readline().strip()
self.tokenizer_lock.release()
return tok_line
def detokenize(self, line):
self.detokenizer_lock.acquire()
self.detokenizer.stdin.write('{}\n'.format(line))
detok_line = self.detokenizer.stdout.readline().strip()
self.detokenizer_lock.release()
return detok_line
def command_line(self, line, ctx_name=None):
# COMMAND [ctx_name] ||| arg1 [||| arg2 ...]
args = [f.strip() for f in line.split('|||')]
if args[-1] == '':
args = args[:-1]
if len(args) > 0:
cmd_name = args[0].split()
# ctx_name provided
if len(cmd_name) == 2:
(cmd_name, ctx_name) = cmd_name
# ctx_name default/passed
else:
cmd_name = cmd_name[0]
(command, nargs) = self.COMMANDS.get(cmd_name, (None, None))
if command and len(args[1:]) in nargs:
logger.info('({}) {} ||| {}'.format(ctx_name, cmd_name, ' ||| '.join(args[1:])))
return command(*args[1:], ctx_name=ctx_name)
logger.info('ERROR: command: {}'.format(' ||| '.join(args)))
def learn(self, source, target, ctx_name=None):
'''Learn from training instance (inc extracting grammar if needed)
Threadsafe, FIFO'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
if '' in (source.strip(), target.strip()):
logger.info('({}) ERROR: empty source or target: {} ||| {}'.format(ctx_name, source, target))
lock.release()
return
if self.norm:
source = self.tokenize(source)
target = self.tokenize(target)
# Align instance
alignment = self.aligner.align(source, target)
grammar_file = self.grammar(source, ctx_name)
# MIRA update before adding data to grammar extractor
decoder = self.decoders[ctx_name]
mira_log = decoder.decoder.update(source, grammar_file, target)
logger.info('({}) MIRA HBF: {}'.format(ctx_name, mira_log))
# Add to HPYPLM by writing to fifo (read on next translation)
if self.hpyplm:
logger.info('({}) Adding to HPYPLM: {}'.format(ctx_name, target))
decoder.ref_fifo.write('{}\n'.format(target))
decoder.ref_fifo.flush()
# Store incremental data for save/load
self.ctx_data[ctx_name].append((source, target, alignment))
# Add aligned sentence pair to grammar extractor
logger.info('({}) Adding to bitext: {} ||| {} ||| {}'.format(ctx_name, source, target, alignment))
self.extractor.add_instance(source, target, alignment, ctx_name)
# Clear (old) cached grammar
rm_grammar = self.grammar_dict[ctx_name].pop(source)
os.remove(rm_grammar)
lock.release()
def save_state(self, file_or_stringio=None, ctx_name=None):
'''Write state (several lines terminated by EOF line) to file, buffer, or stdout'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
ctx_data = self.ctx_data[ctx_name]
# Filename, StringIO or None (stdout)
if file_or_stringio:
if isinstance(file_or_stringio, StringIO.StringIO):
out = file_or_stringio
else:
out = open(file_or_stringio, 'w')
else:
out = sys.stdout
logger.info('({}) Saving state with {} sentences'.format(ctx_name, len(ctx_data)))
out.write('{}\n'.format(self.decoders[ctx_name].decoder.get_weights()))
for (source, target, alignment) in ctx_data:
out.write('{} ||| {} ||| {}\n'.format(source, target, alignment))
out.write('EOF\n')
# Close if file
if file_or_stringio and not isinstance(file_or_stringio, StringIO.StringIO):
out.close()
lock.release()
def load_state(self, file_or_stringio=None, ctx_name=None):
'''Load state (several lines terminated by EOF line) from file, buffer, or stdin.
Restarts context on any error.'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
ctx_data = self.ctx_data[ctx_name]
decoder = self.decoders[ctx_name]
# Filename, StringIO, or None (stdin)
if file_or_stringio:
if isinstance(file_or_stringio, StringIO.StringIO):
input = file_or_stringio
else:
input = open(file_or_stringio)
else:
input = sys.stdin
# Non-initial load error
if ctx_data:
logger.info('({}) ERROR: Incremental data has already been added to context'.format(ctx_name))
logger.info(' State can only be loaded to a new context.')
lock.release()
return
# Many things can go wrong if bad state data is given
try:
# MIRA weights
line = input.readline().strip()
# Throws exception if bad line
decoder.decoder.set_weights(line)
logger.info('({}) Loading state...'.format(ctx_name))
start_time = time.time()
# Lines source ||| target ||| alignment
while True:
line = input.readline()
if not line:
raise Exception('End of file before EOF line')
line = line.strip()
if line == 'EOF':
break
(source, target, alignment) = line.split(' ||| ')
ctx_data.append((source, target, alignment))
# Extractor
self.extractor.add_instance(source, target, alignment, ctx_name)
# HPYPLM
if self.hpyplm:
hyp = decoder.decoder.decode(LIKELY_OOV)
decoder.ref_fifo.write('{}\n'.format(target))
decoder.ref_fifo.flush()
stop_time = time.time()
logger.info('({}) Loaded state with {} sentences in {} seconds'.format(ctx_name, len(ctx_data), stop_time - start_time))
lock.release()
# Recover from bad load attempt by restarting context.
# Guaranteed not to cause data loss since only a new context can load state.
except:
logger.info('({}) ERROR: could not load state, restarting context'.format(ctx_name))
# ctx_name is already owned and needs to be restarted before other blocking threads use
self.drop_ctx(ctx_name, force=True)
self.lazy_ctx(ctx_name)
lock.release()
|
{
"content_hash": "ea1a7f458efa29a61bc270e6fb71faec",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 132,
"avg_line_length": 41.51633986928105,
"alnum_prop": 0.580132241813602,
"repo_name": "pks/cdec-dtrain-legacy",
"id": "70ed0c3c67ba5e717723bacb87e8716b6b030f79",
"size": "19079",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "realtime/rt/rt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "100229"
},
{
"name": "C++",
"bytes": "2857299"
},
{
"name": "Groff",
"bytes": "964240"
},
{
"name": "LLVM",
"bytes": "11021"
},
{
"name": "Perl",
"bytes": "204327"
},
{
"name": "Python",
"bytes": "426963"
},
{
"name": "Ruby",
"bytes": "8041"
},
{
"name": "Shell",
"bytes": "3861"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.views.audit_scores
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <opensource@bwater.com>
"""
from security_monkey.views import AuthenticatedService
from security_monkey.views import AUDIT_SCORE_FIELDS
from security_monkey.views import ACCOUNT_PATTERN_AUDIT_SCORE_FIELDS
from security_monkey.datastore import ItemAuditScore
from security_monkey import db, rbac
from flask.ext.restful import marshal, reqparse
class AuditScoresGet(AuthenticatedService):
decorators = [
rbac.allow(["View"], ["GET"]),
rbac.allow(["Admin"], ["POST"])
]
def __init__(self):
super(AuditScoresGet, self).__init__()
def get(self):
"""
.. http:get:: /api/1/auditscores
Get a list of override scores for audit items.
**Example Request**:
.. sourcecode:: http
GET /api/1/auditscores HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
count: 1,
items: [
{
"id": 123,
"method": "check_xxx",
"technology": "policy",
"score": 1
},
],
total: 1,
page: 1,
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 401: Authentication failure. Please login.
"""
self.reqparse.add_argument(
'count', type=int, default=30, location='args')
self.reqparse.add_argument(
'page', type=int, default=1, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
result = ItemAuditScore.query.order_by(ItemAuditScore.technology).paginate(page, count, error_out=False)
items = []
for entry in result.items:
auditscore_marshaled = marshal(entry.__dict__, AUDIT_SCORE_FIELDS)
items.append(auditscore_marshaled)
marshaled_dict = {
'total': result.total,
'count': len(items),
'page': result.page,
'items': items,
'auth': self.auth_dict
}
return marshaled_dict, 200
def post(self):
"""
.. http:post:: /api/1/auditscores
Create a new override audit score.
**Example Request**:
.. sourcecode:: http
POST /api/1/auditscores HTTP/1.1
Host: example.com
Accept: application/json
{
"method": "check_xxx",
"technology": "policy",
"score": 1
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 201 Created
Vary: Accept
Content-Type: application/json
{
"id": 123,
"name": "Corp",
"notes": "Corporate Network",
"cidr": "1.2.3.4/22"
}
:statuscode 201: created
:statuscode 401: Authentication Error. Please Login.
"""
self.reqparse.add_argument('method', required=True, type=unicode, help='Must provide method name',
location='json')
self.reqparse.add_argument('technology', required=True, type=unicode, help='Technology required.',
location='json')
self.reqparse.add_argument('score', required=False, type=unicode, help='Override score required',
location='json')
self.reqparse.add_argument('disabled', required=True, type=unicode, help='Disabled flag',
location='json')
args = self.reqparse.parse_args()
method = args['method']
technology = args['technology']
score = args['score']
if score is None:
score = 0
disabled = args['disabled']
query = ItemAuditScore.query.filter(ItemAuditScore.technology == technology)
query = query.filter(ItemAuditScore.method == method)
auditscore = query.first()
if not auditscore:
auditscore = ItemAuditScore()
auditscore.method = method
auditscore.technology = technology
auditscore.score = int(score)
auditscore.disabled = bool(disabled)
db.session.add(auditscore)
db.session.commit()
db.session.refresh(auditscore)
auditscore_marshaled = marshal(auditscore.__dict__, AUDIT_SCORE_FIELDS)
auditscore_marshaled['auth'] = self.auth_dict
return auditscore_marshaled, 201
class AuditScoreGetPutDelete(AuthenticatedService):
decorators = [
rbac.allow(["View"], ["GET"]),
rbac.allow(["Admin"], ["PUT", "DELETE"])
]
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(AuditScoreGetPutDelete, self).__init__()
def get(self, id):
"""
.. http:get:: /api/1/auditscores/<int:id>
Get the overide audit score with given ID.
**Example Request**:
.. sourcecode:: http
GET /api/1/auditscores/123 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"id": 123,
"method": "check_xxx",
"technology": "policy",
"score": "1",
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 404: item with given ID not found
:statuscode 401: Authentication failure. Please login.
"""
result = ItemAuditScore.query.filter(ItemAuditScore.id == id).first()
if not result:
return {"status": "Override Audit Score with the given ID not found."}, 404
auditscore_marshaled = marshal(result.__dict__, AUDIT_SCORE_FIELDS)
auditscore_marshaled['auth'] = self.auth_dict
account_pattern_scores_marshaled = []
for account_pattern_score in result.account_pattern_scores:
account_pattern_score_marshaled = marshal(account_pattern_score, ACCOUNT_PATTERN_AUDIT_SCORE_FIELDS)
account_pattern_scores_marshaled.append(account_pattern_score_marshaled)
auditscore_marshaled['account_pattern_scores'] = account_pattern_scores_marshaled
return auditscore_marshaled, 200
def put(self, id):
"""
.. http:get:: /api/1/auditscores/<int:id>
Update override audit score with the given ID.
**Example Request**:
.. sourcecode:: http
PUT /api/1/auditscores/123 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"id": 123,
"method": "check_xxx",
"technology": "policy",
"Score": "1"
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"id": 123,
"score": "1",
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 404: item with given ID not found
:statuscode 401: Authentication failure. Please login.
"""
self.reqparse.add_argument('method', required=True, type=unicode, help='Must provide method name',
location='json')
self.reqparse.add_argument('technology', required=True, type=unicode, help='Technology required.',
location='json')
self.reqparse.add_argument('score', required=False, type=unicode, help='Must provide score.',
location='json')
self.reqparse.add_argument('disabled', required=True, type=unicode, help='Must disabled flag.',
location='json')
args = self.reqparse.parse_args()
score = args['score']
if score is None:
score = 0
result = ItemAuditScore.query.filter(ItemAuditScore.id == id).first()
if not result:
return {"status": "Override audit score with the given ID not found."}, 404
result.method = args['method']
result.technology = args['technology']
result.disabled = args['disabled']
result.score = int(score)
db.session.add(result)
db.session.commit()
db.session.refresh(result)
auditscore_marshaled = marshal(result.__dict__, AUDIT_SCORE_FIELDS)
auditscore_marshaled['auth'] = self.auth_dict
return auditscore_marshaled, 200
def delete(self, id):
"""
.. http:delete:: /api/1/auditscores/123
Delete an override audit score
**Example Request**:
.. sourcecode:: http
DELETE /api/1/auditscores/123 HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202 Accepted
Vary: Accept
Content-Type: application/json
{
'status': 'deleted'
}
:statuscode 202: accepted
:statuscode 401: Authentication Error. Please Login.
"""
result = ItemAuditScore.query.filter(ItemAuditScore.id == id).first()
db.session.delete(result)
db.session.commit()
return {'status': 'deleted'}, 202
|
{
"content_hash": "cfebfc86d0fa3e203d6fff4e418cabeb",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 112,
"avg_line_length": 30.76068376068376,
"alnum_prop": 0.5085671945910901,
"repo_name": "stackArmor/security_monkey",
"id": "e12d57434ed7e13891cf44cc17efbc8f3c8affb5",
"size": "11423",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "security_monkey/views/audit_scores.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33462"
},
{
"name": "Dart",
"bytes": "137774"
},
{
"name": "Dockerfile",
"bytes": "3798"
},
{
"name": "HTML",
"bytes": "165572"
},
{
"name": "JavaScript",
"bytes": "984069"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1682110"
},
{
"name": "Shell",
"bytes": "29978"
}
],
"symlink_target": ""
}
|
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf
_, tf, _ = try_import_tf()
class NoopModel(TFModelV2):
"""Trivial model that just returns the obs flattened.
This is the model used if use_state_preprocessor=False."""
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
return tf.cast(input_dict["obs_flat"], tf.float32), state
class TorchNoopModel(TorchModelV2):
"""Trivial model that just returns the obs flattened.
This is the model used if use_state_preprocessor=False."""
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
return input_dict["obs_flat"].float(), state
|
{
"content_hash": "87514c53e5f0f0236ea0891d0b7683f6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 32.2962962962963,
"alnum_prop": 0.7282110091743119,
"repo_name": "pcmoritz/ray-1",
"id": "4dba83b9d4d4954c2b6b4755deabf17b4c23d886",
"size": "872",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rllib/agents/ddpg/noop_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
from test_framework.blocktools import (
COINBASE_MATURITY,
add_witness_commitment,
create_block,
create_coinbase,
send_to_witness,
)
from test_framework.messages import (
MAX_BIP125_RBF_SEQUENCE,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet import MiniWallet
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
# Fee rates (sat/vB)
INSUFFICIENT = 1
ECONOMICAL = 50
NORMAL = 100
HIGH = 500
TOO_HIGH = 100000
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-addresstype=bech32",
"-whitelist=noban@127.0.0.1",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def clear_mempool(self):
# Clear mempool between subtests. The subtests may only depend on chainstate (utxos)
self.generate(self.nodes[1], 1)
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
self.generate(peer_node, 110)
for _ in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
self.generate(peer_node, 1)
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
for mode in ["default", "fee_rate"]:
test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address)
self.test_invalid_parameters(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
test_nonrbf_bumpfee_fails(self, peer_node, dest_address)
test_notmine_bumpfee(self, rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address)
test_dust_to_fee(self, rbf_node, dest_address)
test_watchonly_psbt(self, peer_node, rbf_node, dest_address)
test_rebumping(self, rbf_node, dest_address)
test_rebumping_not_replaceable(self, rbf_node, dest_address)
test_bumpfee_already_spent(self, rbf_node, dest_address)
test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address)
test_bumpfee_metadata(self, rbf_node, dest_address)
test_locked_wallet_fails(self, rbf_node, dest_address)
test_change_script_match(self, rbf_node, dest_address)
test_settxfee(self, rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
# These tests wipe out a number of utxos that are expected in other tests
test_small_output_with_feerate_succeeds(self, rbf_node, dest_address)
test_no_more_inputs_fails(self, rbf_node, dest_address)
def test_invalid_parameters(self, rbf_node, peer_node, dest_address):
self.log.info('Test invalid parameters')
rbfid = spend_one_input(rbf_node, dest_address)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-3, "Unexpected key {}".format(key), rbf_node.bumpfee, rbfid, {key: NORMAL})
# Bumping to just above minrelay should fail to increase the total fee enough.
assert_raises_rpc_error(-8, "Insufficient total fee 0.00000141", rbf_node.bumpfee, rbfid, {"fee_rate": INSUFFICIENT})
self.log.info("Test invalid fee rate settings")
assert_raises_rpc_error(-4, "Specified or calculated fee 0.141 is too high (cannot be higher than -maxtxfee 0.10",
rbf_node.bumpfee, rbfid, {"fee_rate": TOO_HIGH})
# Test fee_rate with zero values.
msg = "Insufficient total fee 0.00"
for zero_value in [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]:
assert_raises_rpc_error(-8, msg, rbf_node.bumpfee, rbfid, {"fee_rate": zero_value})
msg = "Invalid amount"
# Test fee_rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, msg, rbf_node.bumpfee, rbfid, {"fee_rate": invalid_value})
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, msg, rbf_node.bumpfee, rbfid, {"fee_rate": invalid_value})
# Test fee_rate out of range (negative number).
assert_raises_rpc_error(-3, "Amount out of range", rbf_node.bumpfee, rbfid, {"fee_rate": -1})
# Test type error.
for value in [{"foo": "bar"}, True]:
assert_raises_rpc_error(-3, "Amount is not a number or string", rbf_node.bumpfee, rbfid, {"fee_rate": value})
self.log.info("Test explicit fee rate raises RPC error if both fee_rate and conf_target are passed")
assert_raises_rpc_error(-8, "Cannot specify both conf_target and fee_rate. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.",
rbf_node.bumpfee, rbfid, {"conf_target": NORMAL, "fee_rate": NORMAL})
self.log.info("Test explicit fee rate raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
rbf_node.bumpfee, rbfid, {"estimate_mode": "economical", "fee_rate": NORMAL})
self.log.info("Test invalid conf_target settings")
assert_raises_rpc_error(-8, "confTarget and conf_target options should not both be set",
rbf_node.bumpfee, rbfid, {"confTarget": 123, "conf_target": 456})
self.log.info("Test invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, f"JSON value of type {k} for field estimate_mode is not of expected type string",
rbf_node.bumpfee, rbfid, {"estimate_mode": v})
for mode in ["foo", Decimal("3.1415"), "sat/B", "BTC/kB"]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
rbf_node.bumpfee, rbfid, {"estimate_mode": mode})
self.clear_mempool()
def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
self.log.info('Test simple bumpfee: {}'.format(mode))
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
if mode == "fee_rate":
bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": str(NORMAL)})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
else:
bumped_psbt = rbf_node.psbtbumpfee(rbfid)
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] > -rbftx["fee"]
assert_equal(bumped_tx["origfee"], -rbftx["fee"])
assert "psbt" not in bumped_tx
assert_equal(bumped_psbt["errors"], [])
assert bumped_psbt["fee"] > -rbftx["fee"]
assert_equal(bumped_psbt["origfee"], -rbftx["fee"])
assert "psbt" in bumped_psbt
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
self.sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
self.clear_mempool()
def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
self.log.info('Test that segwit-sourcing bumpfee works')
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='bech32'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": MAX_BIP125_RBF_SEQUENCE
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
self.clear_mempool()
def test_nonrbf_bumpfee_fails(self, peer_node, dest_address):
self.log.info('Test that we cannot replace a non RBF transaction')
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
self.clear_mempool()
def test_notmine_bumpfee(self, rbf_node, peer_node, dest_address):
self.log.info('Test that it cannot bump fee if non-owned inputs are included')
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
fee = Decimal("0.001")
utxos = [node.listunspent(query_options={'minimumAmount': fee})[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": MAX_BIP125_RBF_SEQUENCE
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - fee
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
entry = rbf_node.getmempoolentry(rbfid)
old_fee = entry["fees"]["base"]
old_feerate = int(old_fee / entry["vsize"] * Decimal(1e8))
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def finish_psbtbumpfee(psbt):
psbt = rbf_node.walletprocesspsbt(psbt)
psbt = peer_node.walletprocesspsbt(psbt["psbt"])
final = rbf_node.finalizepsbt(psbt["psbt"])
res = rbf_node.testmempoolaccept([final["hex"]])
assert res[0]["allowed"]
assert_greater_than(res[0]["fees"]["base"], old_fee)
self.log.info("Test that psbtbumpfee works for non-owned inputs")
psbt = rbf_node.psbtbumpfee(txid=rbfid)
finish_psbtbumpfee(psbt["psbt"])
psbt = rbf_node.psbtbumpfee(txid=rbfid, options={"fee_rate": old_feerate + 10})
finish_psbtbumpfee(psbt["psbt"])
self.clear_mempool()
def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address):
self.log.info('Test that fee cannot be bumped when it has descendant')
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
# create tx with descendant in the mempool by using MiniWallet
miniwallet = MiniWallet(rbf_node)
parent_id = spend_one_input(rbf_node, miniwallet.get_address())
tx = rbf_node.gettransaction(txid=parent_id, verbose=True)['decoded']
miniwallet.scan_tx(tx)
miniwallet.send_self_transfer(from_node=rbf_node)
assert_raises_rpc_error(-8, "Transaction has descendants in the mempool", rbf_node.bumpfee, parent_id)
self.clear_mempool()
def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
self.log.info('Testing small output with feerate bump succeeds')
# Make sure additional inputs exist
self.generatetoaddress(rbf_node, COINBASE_MATURITY + 1, rbf_node.getnewaddress())
rbfid = spend_one_input(rbf_node, dest_address)
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_equal(len(input_list), 1)
original_txin = input_list[0]
self.log.info('Keep bumping until transaction fee out-spends non-destination value')
tx_fee = 0
while True:
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
new_item = list(input_list)[0]
assert_equal(len(input_list), 1)
assert_equal(original_txin["txid"], new_item["txid"])
assert_equal(original_txin["vout"], new_item["vout"])
rbfid_new_details = rbf_node.bumpfee(rbfid)
rbfid_new = rbfid_new_details["txid"]
raw_pool = rbf_node.getrawmempool()
assert rbfid not in raw_pool
assert rbfid_new in raw_pool
rbfid = rbfid_new
tx_fee = rbfid_new_details["fee"]
# Total value from input not going to destination
if tx_fee > Decimal('0.00050000'):
break
# input(s) have been added
final_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_greater_than(len(final_input_list), 1)
# Original input is in final set
assert [txin for txin in final_input_list
if txin["txid"] == original_txin["txid"]
and txin["vout"] == original_txin["vout"]]
self.generatetoaddress(rbf_node, 1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
self.clear_mempool()
def test_dust_to_fee(self, rbf_node, dest_address):
self.log.info('Test that bumped output that is dust is dropped to fee')
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# The DER formatting used by Bitcoin to serialize ECDSA signatures means that signatures can have a
# variable size of 70-72 bytes (or possibly even less), with most being 71 or 72 bytes. The signature
# in the witness is divided by 4 for the vsize, so this variance can take the weight across a 4-byte
# boundary. Thus expected transaction size (p2wpkh, 1 input, 2 outputs) is 140-141 vbytes, usually 141.
if not 140 <= fulltx["vsize"] <= 141:
raise AssertionError("Invalid tx vsize of {} (140-141 expected), full tx: {}".format(fulltx["vsize"], fulltx))
# Bump with fee_rate of 350.25 sat/vB vbytes to create dust.
# Expected fee is 141 vbytes * fee_rate 0.00350250 BTC / 1000 vbytes = 0.00049385 BTC.
# or occasionally 140 vbytes * fee_rate 0.00350250 BTC / 1000 vbytes = 0.00049035 BTC.
# Dust should be dropped to the fee, so actual bump fee is 0.00050000 BTC.
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": 350.25})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
assert_equal(full_bumped_tx["vout"][0]['value'], Decimal("0.00050000"))
self.clear_mempool()
def test_settxfee(self, rbf_node, dest_address):
self.log.info('Test settxfee')
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
# check that settxfee respects -maxtxfee
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
assert_raises_rpc_error(-8, "txfee cannot be more than wallet max tx fee", rbf_node.settxfee, Decimal('0.00003'))
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.connect_nodes(1, 0)
self.clear_mempool()
def test_maxtxfee_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when it hits -maxtxfee')
# size of bumped transaction (p2wpkh, 1 input, 2 outputs): 141 vbytes
# expected bump fee of 141 vbytes * 0.00200000 BTC / 1000 vbytes = 0.00002820 BTC
# which exceeds maxtxfee and is expected to raise
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Unable to create transaction. Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", rbf_node.bumpfee, rbfid)
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.connect_nodes(1, 0)
self.clear_mempool()
def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
self.log.info('Test that PSBT is returned for bumpfee in watchonly wallets')
priv_rec_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0"
pub_rec_desc = rbf_node.getdescriptorinfo(priv_rec_desc)["descriptor"]
priv_change_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh"
pub_change_desc = rbf_node.getdescriptorinfo(priv_change_desc)["descriptor"]
# Create a wallet with private keys that can sign PSBTs
rbf_node.createwallet(wallet_name="signer", disable_private_keys=False, blank=True)
signer = rbf_node.get_wallet_rpc("signer")
assert signer.getwalletinfo()['private_keys_enabled']
reqs = [{
"desc": priv_rec_desc,
"timestamp": 0,
"range": [0,1],
"internal": False,
"keypool": False # Keys can only be imported to the keypool when private keys are disabled
},
{
"desc": priv_change_desc,
"timestamp": 0,
"range": [0, 0],
"internal": True,
"keypool": False
}]
if self.options.descriptors:
result = signer.importdescriptors(reqs)
else:
result = signer.importmulti(reqs)
assert_equal(result, [{'success': True}, {'success': True}])
# Create another wallet with just the public keys, which creates PSBTs
rbf_node.createwallet(wallet_name="watcher", disable_private_keys=True, blank=True)
watcher = rbf_node.get_wallet_rpc("watcher")
assert not watcher.getwalletinfo()['private_keys_enabled']
reqs = [{
"desc": pub_rec_desc,
"timestamp": 0,
"range": [0, 10],
"internal": False,
"keypool": True,
"watchonly": True,
"active": True,
}, {
"desc": pub_change_desc,
"timestamp": 0,
"range": [0, 10],
"internal": True,
"keypool": True,
"watchonly": True,
"active": True,
}]
if self.options.descriptors:
result = watcher.importdescriptors(reqs)
else:
result = watcher.importmulti(reqs)
assert_equal(result, [{'success': True}, {'success': True}])
funding_address1 = watcher.getnewaddress(address_type='bech32')
funding_address2 = watcher.getnewaddress(address_type='bech32')
peer_node.sendmany("", {funding_address1: 0.001, funding_address2: 0.001})
self.generate(peer_node, 1)
# Create single-input PSBT for transaction to be bumped
# Ensure the payment amount + change can be fully funded using one of the 0.001BTC inputs.
psbt = watcher.walletcreatefundedpsbt([watcher.listunspent()[0]], {dest_address: 0.0005}, 0,
{"fee_rate": 1, "add_inputs": False}, True)['psbt']
psbt_signed = signer.walletprocesspsbt(psbt=psbt, sign=True, sighashtype="ALL", bip32derivs=True)
psbt_final = watcher.finalizepsbt(psbt_signed["psbt"])
original_txid = watcher.sendrawtransaction(psbt_final["hex"])
assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1)
# bumpfee can't be used on watchonly wallets
assert_raises_rpc_error(-4, "bumpfee is not available with wallets that have private keys disabled. Use psbtbumpfee instead.", watcher.bumpfee, original_txid)
# Bump fee, obnoxiously high to add additional watchonly input
bumped_psbt = watcher.psbtbumpfee(original_txid, {"fee_rate": HIGH})
assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1)
assert "txid" not in bumped_psbt
assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"])
assert not watcher.finalizepsbt(bumped_psbt["psbt"])["complete"]
# Sign bumped transaction
bumped_psbt_signed = signer.walletprocesspsbt(psbt=bumped_psbt["psbt"], sign=True, sighashtype="ALL", bip32derivs=True)
bumped_psbt_final = watcher.finalizepsbt(bumped_psbt_signed["psbt"])
assert bumped_psbt_final["complete"]
# Broadcast bumped transaction
bumped_txid = watcher.sendrawtransaction(bumped_psbt_final["hex"])
assert bumped_txid in rbf_node.getrawmempool()
assert original_txid not in rbf_node.getrawmempool()
rbf_node.unloadwallet("watcher")
rbf_node.unloadwallet("signer")
self.clear_mempool()
def test_rebumping(self, rbf_node, dest_address):
self.log.info('Test that re-bumping the original tx fails, but bumping successor works')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_raises_rpc_error(-4, f"Cannot bump transaction {rbfid} which was already bumped by transaction {bumped['txid']}",
rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL})
rbf_node.bumpfee(bumped["txid"], {"fee_rate": NORMAL})
self.clear_mempool()
def test_rebumping_not_replaceable(self, rbf_node, dest_address):
self.log.info('Test that re-bumping non-replaceable fails')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"fee_rate": NORMAL})
self.clear_mempool()
def test_bumpfee_already_spent(self, rbf_node, dest_address):
self.log.info('Test that bumping tx with already spent coin fails')
txid = spend_one_input(rbf_node, dest_address)
self.generate(rbf_node, 1) # spend coin simply by mining block with tx
spent_input = rbf_node.gettransaction(txid=txid, verbose=True)['decoded']['vin'][0]
assert_raises_rpc_error(-1, f"{spent_input['txid']}:{spent_input['vout']} is already spent",
rbf_node.bumpfee, txid, {"fee_rate": NORMAL})
def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
self.log.info('Test that unconfirmed outputs from bumped txns are not spendable')
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
self.generate(rbf_node, 1, sync_fun=self.no_op)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
self.clear_mempool()
def test_bumpfee_metadata(self, rbf_node, dest_address):
self.log.info('Test that bumped txn metadata persists to new txn record')
assert(rbf_node.getbalance() < 49)
self.generatetoaddress(rbf_node, 101, rbf_node.getnewaddress())
rbfid = rbf_node.sendtoaddress(dest_address, 49, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
self.clear_mempool()
def test_locked_wallet_fails(self, rbf_node, dest_address):
self.log.info('Test that locked wallet cannot bump txn')
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.clear_mempool()
def test_change_script_match(self, rbf_node, dest_address):
self.log.info('Test that the same change addresses is used for the replacement transaction when possible')
def get_change_address(tx):
tx_details = rbf_node.getrawtransaction(tx, 1)
txout_addresses = [txout['scriptPubKey']['address'] for txout in tx_details["vout"]]
return [address for address in txout_addresses if rbf_node.getaddressinfo(address)["ischange"]]
# Check that there is only one change output
rbfid = spend_one_input(rbf_node, dest_address)
change_addresses = get_change_address(rbfid)
assert_equal(len(change_addresses), 1)
# Now find that address in each subsequent tx, and no other change
bumped_total_tx = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_equal(change_addresses, get_change_address(bumped_total_tx['txid']))
bumped_rate_tx = rbf_node.bumpfee(bumped_total_tx["txid"])
assert_equal(change_addresses, get_change_address(bumped_rate_tx['txid']))
self.clear_mempool()
def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
tx_input = dict(
sequence=MAX_BIP125_RBF_SEQUENCE, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
destinations = {dest_address: Decimal("0.00050000")}
if change_size > 0:
destinations[node.getrawchangeaddress()] = change_size
rawtx = node.createrawtransaction([tx_input], destinations)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time, txlist=[tx])
add_witness_commitment(block)
block.solve()
node.submitblock(block.serialize().hex())
return block
def test_no_more_inputs_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when there are no available confirmed outputs')
# feerate rbf requires confirmed outputs when change output doesn't exist or is insufficient
self.generatetoaddress(rbf_node, 1, dest_address)
# spend all funds, no change output
rbfid = rbf_node.sendall(recipients=[rbf_node.getnewaddress()])['txid']
assert_raises_rpc_error(-4, "Unable to create transaction. Insufficient funds", rbf_node.bumpfee, rbfid)
self.clear_mempool()
if __name__ == "__main__":
BumpFeeTest().main()
|
{
"content_hash": "9d3a3192834f2adefe92c401ceb54f4d",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 175,
"avg_line_length": 48.7953125,
"alnum_prop": 0.6813218482820456,
"repo_name": "kallewoof/bitcoin",
"id": "2ee3e00a7b5ceae7f86b50023bae3473a55b8f45",
"size": "31443",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/functional/wallet_bumpfee.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "C",
"bytes": "1305140"
},
{
"name": "C++",
"bytes": "10322251"
},
{
"name": "CMake",
"bytes": "29182"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1740"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "M4",
"bytes": "220899"
},
{
"name": "Makefile",
"bytes": "147441"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "3015520"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "58534"
},
{
"name": "Scheme",
"bytes": "26038"
},
{
"name": "Shell",
"bytes": "168334"
}
],
"symlink_target": ""
}
|
import time
import config
import util.cryptography
from linkr import db
class Link(db.Model):
"""
Model representing a link entry.
"""
__tablename__ = 'link'
link_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, index=True, default=None)
submit_time = db.Column(db.Integer)
alias = db.Column(db.String(32), index=True, unique=True)
outgoing_url = db.Column(db.Text)
password_hash = db.Column(db.Text, default=None)
require_recaptcha = db.Column(db.Boolean, default=False)
def __init__(
self,
alias,
outgoing_url,
password=None,
user_id=None,
require_recaptcha=False,
):
"""
Create a new link entry.
:param alias: The alias for the link.
:param outgoing_url: The fully qualified outgoing URL for the link.
:param password: An optional password to associate with the link.
:param user_id: An optional user ID to associate with the link.
"""
self.submit_time = int(time.time())
self.alias = alias
self.outgoing_url = outgoing_url
self.password_hash = util.cryptography.secure_hash(password) if password else None
self.user_id = user_id
self.require_recaptcha = require_recaptcha
def edit(self, alias=None, outgoing_url=None):
"""
Edit this link's alias or outgoing URL. All input fields are optional; if an input has a
falsey value, its value will not be changed.
:param alias: The new alias.
:param outgoing_url: The new outgoing URL.
"""
if alias:
self.alias = alias
if outgoing_url:
self.outgoing_url = outgoing_url
def update_password(self, password):
"""
Update the password for this link. To remove this link's password, the password must be
explicitly passed in as a falsey value.
:param password: The new plain-text password, or None.
"""
self.password_hash = util.cryptography.secure_hash(password) if password else None
def validate_password(self, password):
"""
Validate that the supplied link password is correct for this link. This method will
consider any password to be "correct" if there is no password associated with this link.
:param password: The supplied password.
:return: True if the supplied password matches the link's password or if the link is not
password protected; False otherwise.
"""
return not self.is_password_protected() or \
util.cryptography.secure_hash(password) == self.password_hash
def as_dict(self):
"""
Represent this link entry as a API-friendly, JSON-formatted dictionary.
:return: A representation of this link's data as a dictionary.
"""
return {
'link_id': self.link_id,
'user_id': self.user_id,
'submit_time': self.submit_time,
'alias': self.alias,
'full_alias': '{base}/{alias}'.format(
base=config.options.server('linkr_url'),
alias=self.alias,
),
'outgoing_url': self.outgoing_url,
'is_password_protected': self.is_password_protected(),
'require_recaptcha': self.require_recaptcha,
}
def is_password_protected(self):
"""
Return whether this link is password protected.
:return: True if the link is password protected; False otherwise.
"""
return bool(self.password_hash)
|
{
"content_hash": "dc6b3e0befc7dbc2be5976dac61c3bb1",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 96,
"avg_line_length": 34.509433962264154,
"alnum_prop": 0.6134499726626572,
"repo_name": "LINKIWI/linkr",
"id": "c4d340a7324117356731629ded8d84b075276001",
"size": "3658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15753"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "Groovy",
"bytes": "1908"
},
{
"name": "HTML",
"bytes": "1043"
},
{
"name": "JavaScript",
"bytes": "230542"
},
{
"name": "Python",
"bytes": "191755"
}
],
"symlink_target": ""
}
|
"""distutils.command
Package containing implementation of all the standard Distutils
commands."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: __init__.py 71272 2009-04-05 21:21:05Z georg.brandl $"
__all__ = ['build',
'build_py',
'build_ext',
'build_clib',
'build_scripts',
'clean',
'install',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
'sdist',
'register',
'bdist',
'bdist_dumb',
'bdist_rpm',
'bdist_wininst',
'upload',
# These two are reserved for future use:
#'bdist_sdux',
#'bdist_pkgtool',
# Note:
# bdist_packager is not included because it only provides
# an abstract base class
]
|
{
"content_hash": "68c5569f583fd4da4a9cab6b5bdaf6c6",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.4747368421052632,
"repo_name": "babyliynfg/cross",
"id": "def882e6ec07f0fa05d6a3420624828cdc71275c",
"size": "950",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/project-creator/Python2.6.6/Lib/distutils/command/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36722"
},
{
"name": "C",
"bytes": "6345646"
},
{
"name": "C++",
"bytes": "15980000"
},
{
"name": "CMake",
"bytes": "1238"
},
{
"name": "GLSL",
"bytes": "64406"
},
{
"name": "HTML",
"bytes": "147661"
},
{
"name": "Java",
"bytes": "574078"
},
{
"name": "JavaScript",
"bytes": "503327"
},
{
"name": "Makefile",
"bytes": "18778"
},
{
"name": "Objective-C",
"bytes": "396703"
},
{
"name": "Objective-C++",
"bytes": "378740"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "15265548"
},
{
"name": "Roff",
"bytes": "23"
},
{
"name": "Shell",
"bytes": "61021"
},
{
"name": "Visual Basic",
"bytes": "19200"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.http import JsonResponse
from datetime import (datetime, timedelta)
from business.models import BusinessModel
from offer.models import OfferModel
from offer.forms import (OfferRegForm, OfferUpdateForm)
from base.apputil import (App_Render, App_LoginRequired, App_PostRequired, App_Redirect)
# Create your views here.
def offer_home_view(request):
offers = OfferModel.fetch_all()
data = {'title': 'Offers', 'offers_list': offers}
return App_Render(request, 'offer/offer_home_1.html', data)
def new1_view(request):
return offer_form_view(request)
def online_view(request):
return offer_home_view(request)
def nearby_view(request):
return offer_home_view(request)
def bulk_view(request):
return offer_home_view(request)
def food_view(request):
return offer_home_view(request)
def offer_detail_view(request, slug):
print('slug : '+slug)
offer = OfferModel.fetch_by_slug(slug)
data = {'title': 'View Offers', 'offer': offer}
return App_Render(request, 'offer/offer_item_detail_1.html', data)
@App_LoginRequired
def offer_form_view(request):
print(request)
businesses = BusinessModel.fetch_by_user(request.user)
print(businesses)
data = {'title': 'Create Offer', 'businesses': businesses}
if 'form_errors' in request.session:
data['form_errors'] = request.session['form_errors']
data['form_values'] = request.session['form_values']
del request.session['form_errors']
del request.session['form_values']
else:
today = datetime.now().date()
start = today.strftime("%Y/%m/%d")
end = (today + timedelta(days=15)).strftime("%Y/%m/%d")
data['form_values'] = {'OF_start_date': start, 'OF_expire_date': end}
return App_Render(request, 'offer/offer_create_form_1.html', data)
@App_LoginRequired
@App_PostRequired
def offer_create(request):
print(request.POST)
data = None
form = OfferRegForm()
if (form.parse(request)
and form.clean()
and form.validate()):
data = form.commit()
if request.is_ajax():
if data is not None:
return JsonResponse({'status': 204, 'message': 'Offer created succesfuly'})
else:
return JsonResponse({'status': 401, 'message': 'Failed to create', 'data': form.errors()})
else:
if data is not None:
request.session['form_errors'] = form.errors()
request.session['form_values'] = form.values()
return App_Redirect(settings.OFFER_CREATE_NEW)
@App_LoginRequired
@App_PostRequired
def offer_update(request):
print(request.POST)
data = None
form = OfferUpdateForm()
if (form.parse(request)
and form.clean()
and form.validate()):
data = form.commit()
if request.is_ajax():
if data is not None:
return JsonResponse({'status': 204, 'message': 'Offer updated succesfuly'})
else:
return JsonResponse({'status': 401, 'message': 'Failed to update', 'data': form.errors()})
else:
if data is not None:
request.session['form_errors'] = form.errors()
request.session['form_values'] = form.values()
return App_Redirect(settings.OFFER_CREATE_NEW)
@App_LoginRequired
@App_PostRequired
def offer_patch(request):
print(request.POST)
data = None
form = OfferUpdateForm()
if (form.parse(request)
and form.clean()
and form.validate()):
data = form.commit()
if request.is_ajax():
if data is not None:
return JsonResponse({'status': 204, 'message': 'Offer updated succesfuly'})
else:
return JsonResponse({'status': 401, 'message': 'Failed to update', 'data': form.errors()})
else:
if data is not None:
request.session['form_errors'] = form.errors()
request.session['form_values'] = form.values()
return App_Redirect(settings.OFFER_CREATE_NEW)
@App_LoginRequired
@App_PostRequired
def offer_delete(request):
print(request.POST)
data = None
form = OfferUpdateForm()
if (form.parse(request)
and form.clean()
and form.validate()):
data = form.commit()
if request.is_ajax():
if data is not None:
return JsonResponse({'status': 204, 'message': 'Offer deleted succesfuly'})
else:
return JsonResponse({'status': 401, 'message': 'Failed to delete', 'data': form.errors()})
else:
if data is not None:
request.session['form_errors'] = form.errors()
request.session['form_values'] = form.values()
return App_Redirect(settings.OFFER_CREATE_NEW)
|
{
"content_hash": "95d2bea278e665d0dfc63e2ed656b7ff",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 102,
"avg_line_length": 30.391025641025642,
"alnum_prop": 0.6344653026787598,
"repo_name": "amitdhiman000/MyOffers",
"id": "610e41ba2bb161a6e2cc19820d94ae0fe87419a6",
"size": "4741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "offer/views.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26343"
},
{
"name": "HTML",
"bytes": "177970"
},
{
"name": "JavaScript",
"bytes": "943727"
},
{
"name": "Python",
"bytes": "196685"
},
{
"name": "Shell",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "50608"
}
],
"symlink_target": ""
}
|
from rest_framework.serializers import (
HyperlinkedIdentityField,
ModelSerializer,
SerializerMethodField
)
from rest_framework import serializers
from comments.api.serializers import CommentSerializer
from posts.models import Post
from comments.models import Comment
post_detail_url = HyperlinkedIdentityField(view_name='posts-api:detail', lookup_field='slug')
class PostListSerializer(serializers.ModelSerializer):
# created = serializers.DateTimeField()
url = post_detail_url
class Meta:
model = Post
fields = [
'url',
'title',
'content',
'updated',
'timestamp'
]
class PostCreateUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = [
'title',
'content',
'publish',
'image',
]
def validate_content(self, data):
print('clean contnet')
print(self, data)
if len(data) < 4:
raise serializers.ValidationError("Content is too short. Should be more than 4 characters")
return data
class PostDetailSerializer(serializers.ModelSerializer):
comments = SerializerMethodField()
image = SerializerMethodField()
url = post_detail_url
class Meta:
model = Post
fields = [
'url',
'title',
'content',
'updated',
'timestamp',
'comments',
'image',
]
def get_comments(self, obj):
c_qs = Comment.objects.filter_by_instance(obj)
comments = CommentSerializer(c_qs, many=True).data
return comments
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
|
{
"content_hash": "eb256962fde5ef3224481ce415d3602c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 94,
"avg_line_length": 20.43243243243243,
"alnum_prop": 0.7129629629629629,
"repo_name": "aminhp93/learning_python",
"id": "d2350729dcfc63300a153ff318cd2b4f69147314",
"size": "1512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/posts/api/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "181187"
},
{
"name": "HTML",
"bytes": "24685"
},
{
"name": "JavaScript",
"bytes": "2208340"
},
{
"name": "Python",
"bytes": "57619"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
#from rest_framework import routers
from Inventory_Management import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
#router = routers.DefaultRouter()
#router.register(r'Equip', views.UserViewSet)
#router.register(r'Computer', views.ComputerViewSet)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'inventory.views.home', name='home'),
# url(r'^inventory/', include('inventory.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
#url(r'^', include(router.urls)),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
#url(r'^login/$', views.login),
#url(r'^logout/$', views.logout),
url(r'^admin/', include(admin.site.urls)),
url(r'^Inventory_Management/', include('Inventory_Management.urls')),
url(r'.*', include(admin.site.urls)),
#url(r'^Equip/$', views.UserViewSet.as_view()),
#url(r'^Computer/$', views.ComputerViewSet.as_view()),
)
#url(r'^events/$', views.EventList.as_view()),
|
{
"content_hash": "62eace0b9ccc433a57f2efc7d980628d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 84,
"avg_line_length": 38.71875,
"alnum_prop": 0.6844229217110573,
"repo_name": "faxandu/Inventory-Management",
"id": "d663924e464da8863d0c1ea10e9f4c7c20abfe5e",
"size": "1239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/inventory/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50925"
}
],
"symlink_target": ""
}
|
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils.crypto import get_random_string
from django.urls import reverse
from .forms import StatusForm
def create_user(request):
"""Creates a user with random username and password and logs in the user."""
if request.method == 'POST':
username = get_random_string(16)
password = get_random_string(16)
user = User.objects.create_user(username=username, password=password)
user = authenticate(request, username=username, password=password)
login(request, user)
return HttpResponseRedirect(reverse('chirp:create_status'))
else:
return render(request, 'create_user.html')
@login_required
def create_status(request):
if request.method == 'POST':
form = StatusForm(request.POST, user=request.user)
if form.is_valid():
form.save()
while request.user.status_set.count() > 10:
request.user.status_set.order_by('created_at').first().delete()
success_url = reverse('chirp:profile',
kwargs={'username': request.user.username})
return HttpResponseRedirect(success_url)
else:
form = StatusForm(user=request.user)
return render(request, 'create_status.html', {'form': form})
def profile(request, username):
user = get_object_or_404(User, username=username)
return render(request, 'status_list.html', {'user': user})
def logout_view(request):
if request.method == 'POST' or not request.user.is_authenticated():
logout(request)
return HttpResponseRedirect(reverse('chirp:create_user'))
else:
return render(request, 'logout.html')
|
{
"content_hash": "5006a87822f86d87a1ca999219d4559d",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 38.627450980392155,
"alnum_prop": 0.682233502538071,
"repo_name": "consideratecode/csrf_example",
"id": "325ef6a0c2f8b6b9ff9524bc072e73581d73890e",
"size": "1970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chirp/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4525"
},
{
"name": "Python",
"bytes": "10678"
}
],
"symlink_target": ""
}
|
"""Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=1) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
from itertools import ifilter, ifilterfalse, imap, izip
__all__ = ["cmp","dircmp","cmpfiles"]
_cache = {}
BUFSIZE=8*1024
def cmp(f1, f2, shallow=1):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
Return value:
True if the files are the same, False otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
"""
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
result = _cache.get((f1, f2))
if result and (s1, s2) == result[:2]:
return result[2]
outcome = _do_cmp(f1, f2)
_cache[f1, f2] = s1, s2, outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a,b,ignore=None,hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
self.common = map(a.__getitem__, ifilter(b.__contains__, a))
self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))
self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = os.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.itervalues():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.itervalues():
print
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.itervalues():
print
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError, attr
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=1):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except os.error:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(flist, skip):
return list(ifilterfalse(skip.__contains__, flist))
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
|
{
"content_hash": "4be5223d8915938a6233b8f2e381e23a",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 84,
"avg_line_length": 33.2135593220339,
"alnum_prop": 0.5559297815880792,
"repo_name": "emilk/sproxel",
"id": "8f677c5fd6e0252df6612e99ff3e22b2704601cf",
"size": "9798",
"binary": false,
"copies": "50",
"ref": "refs/heads/master",
"path": "distro/common/lib/filecmp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "320380"
},
{
"name": "C++",
"bytes": "361242"
},
{
"name": "Objective-C",
"bytes": "86382"
},
{
"name": "Prolog",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "8233001"
}
],
"symlink_target": ""
}
|
"""
Lightweight asynchronous framework.
This module defines the protocol used for asynchronous operations in udiskie.
It is based on ideas from "Twisted" and the "yield from" expression in
python3, but more lightweight (incomplete) and compatible with python2.
"""
import traceback
from functools import partial
from subprocess import CalledProcessError
from gi.repository import GLib
from gi.repository import Gio
from .common import cachedproperty, wraps
__all__ = [
'pack',
'to_coro',
'run_bg',
'Future',
'gather',
'Task',
]
ACTIVE_TASKS = set()
def pack(*values):
"""Unpack a return tuple to a yield expression return value."""
# Schizophrenic returns from asyncs. Inspired by
# gi.overrides.Gio.DBusProxy.
if len(values) == 0:
return None
elif len(values) == 1:
return values[0]
else:
return values
class Future:
"""
Base class for asynchronous operations.
One `Future' object represents an asynchronous operation. It allows for
separate result and error handlers which can be set by appending to the
`callbacks` and `errbacks` lists.
Implementations must conform to the following very lightweight protocol:
The task is started on initialization, but most not finish immediately.
Success/error exit is signaled to the observer by calling exactly one of
`self.set_result(value)` or `self.set_exception(exception)` when the
operation finishes.
For implementations, see :class:`Task` or :class:`Dialog`.
"""
@cachedproperty
def callbacks(self):
"""Functions to be called on successful completion."""
return []
@cachedproperty
def errbacks(self):
"""Functions to be called on error completion."""
return []
def _finish(self, callbacks, *args):
"""Set finished state and invoke specified callbacks [internal]."""
return [fn(*args) for fn in callbacks]
def set_result(self, value):
"""Signal successful completion."""
self._finish(self.callbacks, value)
def set_exception(self, exception):
"""Signal unsuccessful completion."""
was_handled = self._finish(self.errbacks, exception)
if not was_handled:
traceback.print_exception(
type(exception), exception, exception.__traceback__)
def __await__(self):
ACTIVE_TASKS.add(self)
try:
return (yield self)
finally:
ACTIVE_TASKS.remove(self)
def to_coro(func):
@wraps(func)
async def coro(*args, **kwargs):
return func(*args, **kwargs)
return coro
def run_bg(func):
@wraps(func)
def runner(*args, **kwargs):
return ensure_future(func(*args, **kwargs))
return runner
class gather(Future):
"""
Manages a collection of asynchronous tasks.
The callbacks are executed when all of the subtasks have completed.
"""
def __init__(self, *tasks):
"""Create from a list of `Future`-s."""
tasks = list(tasks)
self._done = False
self._results = {}
self._num_tasks = len(tasks)
if not tasks:
run_soon(self.set_result, [])
for idx, task in enumerate(tasks):
task = ensure_future(task)
task.callbacks.append(partial(self._subtask_result, idx))
task.errbacks.append(partial(self._subtask_error, idx))
def _subtask_result(self, idx, value):
"""Receive a result from a single subtask."""
self._results[idx] = value
if len(self._results) == self._num_tasks:
self.set_result([
self._results[i]
for i in range(self._num_tasks)
])
def _subtask_error(self, idx, error):
"""Receive an error from a single subtask."""
self.set_exception(error)
self.errbacks.clear()
def call_func(fn, *args):
"""
Call the function with the specified arguments but return None.
This rather boring helper function is used by run_soon to make sure the
function is executed only once.
"""
# NOTE: Apparently, idle_add does not re-execute its argument if an
# exception is raised. So it's okay to let exceptions propagate.
fn(*args)
def run_soon(fn, *args):
"""Run the function once."""
GLib.idle_add(call_func, fn, *args)
def sleep(seconds):
future = Future()
GLib.timeout_add(int(seconds*1000), future.set_result, True)
return future
def ensure_future(awaitable):
if isinstance(awaitable, Future):
return awaitable
return Task(iter(awaitable.__await__()))
class Task(Future):
"""Turns a generator into a Future."""
def __init__(self, generator):
"""Create and start a ``Task`` from the specified generator."""
self._generator = generator
run_soon(self._resume, next, self._generator)
def _resume(self, func, *args):
"""Resume the coroutine by throwing a value or returning a value from
the ``await`` and handle further awaits."""
try:
value = func(*args)
except StopIteration as e:
self._generator.close()
self.set_result(e.value)
except Exception as e:
self._generator.close()
self.set_exception(e)
else:
assert isinstance(value, Future)
value.callbacks.append(partial(self._resume, self._generator.send))
value.errbacks.append(partial(self._resume, self._generator.throw))
def gio_callback(proxy, result, future):
future.set_result(result)
async def exec_subprocess(argv, capture=True):
"""
An Future task that represents a subprocess. If successful, the task's
result is set to the collected STDOUT of the subprocess.
:raises subprocess.CalledProcessError: if the subprocess returns a non-zero
exit code
"""
future = Future()
flags = ((Gio.SubprocessFlags.STDOUT_PIPE if capture else
Gio.SubprocessFlags.NONE) |
Gio.SubprocessFlags.STDIN_INHERIT)
process = Gio.Subprocess.new(argv, flags)
stdin_buf = None
cancellable = None
process.communicate_async(
stdin_buf, cancellable, gio_callback, future)
result = await future
success, stdout, stderr = process.communicate_finish(result)
stdout = stdout.get_data() if capture else None # GLib.Bytes -> bytes
if not success:
raise RuntimeError("Subprocess did not exit normally!")
exit_code = process.get_exit_status()
if exit_code != 0:
raise CalledProcessError(
"Subprocess returned a non-zero exit-status!",
exit_code,
stdout)
return stdout
|
{
"content_hash": "dd71178bfd788a253bcbb27b6c303e21",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 29.00854700854701,
"alnum_prop": 0.6306717737183265,
"repo_name": "coldfix/udiskie",
"id": "9005f9fc97b84c23ed8a2630ae05647c9540764a",
"size": "6788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udiskie/async_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1132"
},
{
"name": "Python",
"bytes": "174183"
},
{
"name": "Shell",
"bytes": "9956"
}
],
"symlink_target": ""
}
|
import os
import unittest
import six
from conans.paths import BUILD_INFO, CONANFILE
from conans.test.utils.tools import TestClient
from conans.util.files import load, mkdir
class SourceTest(unittest.TestCase):
def test_conanfile_removed(self):
# https://github.com/conan-io/conan/issues/4013
conanfile = """from conans import ConanFile
class ScmtestConan(ConanFile):
scm = {
"type": "git",
"url": "auto",
"revision": "auto"
}
"""
client = TestClient()
client.save({"conanfile.py": conanfile})
client.runner("git init .", cwd=client.current_folder)
client.run("source .")
self.assertEqual(sorted(["conanfile.py", '.git']),
sorted(os.listdir(client.current_folder)))
def local_flow_patch_test(self):
# https://github.com/conan-io/conan/issues/2327
conanfile = """from conans import ConanFile, tools
from conans.tools import save
import os
class TestexportConan(ConanFile):
exports = "mypython.py"
exports_sources = "patch.patch"
def source(self):
save("hello/hello.h", "my hello header!")
patch = os.path.join(self.source_folder, "patch.patch")
self.output.info("PATCH: %s" % tools.load(patch))
header = os.path.join(self.source_folder, "hello/hello.h")
self.output.info("HEADER: %s" % tools.load(header))
python = os.path.join(self.source_folder, "mypython.py")
self.output.info("PYTHON: %s" % tools.load(python))
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"patch.patch": "mypatch",
"mypython.py": "mypython"})
client.run("source .")
self.assertIn("conanfile.py: PATCH: mypatch", client.out)
self.assertIn("conanfile.py: HEADER: my hello header!", client.out)
self.assertIn("conanfile.py: PYTHON: mypython", client.out)
client.run("source . -sf=mysrc")
self.assertIn("conanfile.py: Executing exports to", client.out)
self.assertIn("conanfile.py: PATCH: mypatch", client.out)
self.assertIn("conanfile.py: HEADER: my hello header!", client.out)
self.assertIn("conanfile.py: PYTHON: mypython", client.out)
self.assertTrue(os.path.exists(os.path.join(client.current_folder,
"mysrc", "patch.patch")))
self.assertTrue(os.path.exists(os.path.join(client.current_folder,
"mysrc", "mypython.py")))
self.assertTrue(os.path.exists(os.path.join(client.current_folder,
"mysrc", "hello/hello.h")))
def apply_patch_test(self):
# https://github.com/conan-io/conan/issues/2327
# Test if a patch can be applied in source() both in create
# and local flow
client = TestClient()
conanfile = """from conans import ConanFile
from conans.tools import load
import os
class Pkg(ConanFile):
exports_sources = "*"
def source(self):
if self.develop:
patch = os.path.join(self.source_folder, "mypatch")
self.output.info("PATCH: %s" % load(patch))
"""
client.save({"conanfile.py": conanfile,
"mypatch": "this is my patch"})
client.run("source .")
self.assertIn("PATCH: this is my patch", client.out)
client.run("source . -sf=mysrc")
self.assertIn("PATCH: this is my patch", client.out)
client.run("create . Pkg/0.1@user/testing")
self.assertIn("PATCH: this is my patch", client.out)
def source_warning_os_build_test(self):
# https://github.com/conan-io/conan/issues/2368
conanfile = '''from conans import ConanFile
class ConanLib(ConanFile):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("source .")
self.assertNotIn("This package defines both 'os' and 'os_build'", client.out)
def source_reference_test(self):
client = TestClient()
client.run("source lib/1.0@conan/stable", assert_error=True)
self.assertIn("'conan source' doesn't accept a reference anymore", client.out)
def source_with_path_errors_test(self):
client = TestClient()
client.save({"conanfile.txt": "contents"}, clean_first=True)
# Path with conanfile.txt
client.run("source conanfile.txt --install-folder subdir", assert_error=True)
self.assertIn(
"A conanfile.py is needed, %s is not acceptable"
% os.path.join(client.current_folder, "conanfile.txt"),
client.out)
# Path with wrong conanfile path
client.run("package not_real_dir/conanfile.py --build-folder build2 --install-folder build",
assert_error=True)
self.assertIn("Conanfile not found at %s"
% os.path.join(client.current_folder, "not_real_dir", "conanfile.py"),
client.out)
def source_local_cwd_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
self.output.info("Running source!")
self.output.info("cwd=>%s" % os.getcwd())
'''
client = TestClient()
client.save({CONANFILE: conanfile})
subdir = os.path.join(client.current_folder, "subdir")
os.mkdir(subdir)
client.run("install . --install-folder subdir")
client.run("source . --install-folder subdir --source-folder subdir")
self.assertIn("conanfile.py (Hello/0.1@None/None): Configuring sources", client.user_io.out)
self.assertIn("conanfile.py (Hello/0.1@None/None): cwd=>%s" % subdir, client.user_io.out)
def local_source_src_not_exist_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
# Automatically created
client.run("source conanfile.py --source-folder=src")
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "src")))
def build_folder_no_exists_crash_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
# Automatically created
client.run("source ./conanfile.py --install-folder=missing_folder", assert_error=True)
self.assertIn("Specified info-folder doesn't exist", client.out)
def build_folder_reading_infos_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.cpp_info.cxxflags.append("FLAG")
self.env_info.MYVAR = "foo"
self.user_info.OTHERVAR = "bar"
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("export . conan/testing")
conanfile = '''
import os
from conans import ConanFile
from conans.util.files import save
class ConanLib(ConanFile):
requires="Hello/0.1@conan/testing"
def source(self):
assert(os.getcwd() == self.source_folder)
self.output.info("FLAG=%s" % self.deps_cpp_info["Hello"].cxxflags[0])
self.output.info("MYVAR=%s" % self.deps_env_info["Hello"].MYVAR)
self.output.info("OTHERVAR=%s" % self.deps_user_info["Hello"].OTHERVAR)
self.output.info("CURDIR=%s" % os.getcwd())
'''
# First, failing source()
client.save({CONANFILE: conanfile}, clean_first=True)
build_folder = os.path.join(client.current_folder, "build")
src_folder = os.path.join(client.current_folder, "src")
mkdir(build_folder)
mkdir(src_folder)
client.run("source . --install-folder='%s' --source-folder='%s'"
% (build_folder, src_folder),
assert_error=True)
self.assertIn("self.deps_cpp_info not defined.", client.out)
client.run("install . --install-folder build --build ")
client.run("source conanfile.py --install-folder='%s' --source-folder='%s'"
% (build_folder, src_folder))
self.assertIn("FLAG=FLAG", client.out)
self.assertIn("MYVAR=foo", client.out)
self.assertIn("OTHERVAR=bar", client.out)
self.assertIn("CURDIR=%s" % src_folder, client.out)
def repeat_args_fails_test(self):
conanfile = '''
from conans import ConanFile
class ConanLib(ConanFile):
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("source ./conanfile.py --source-folder sf")
with six.assertRaisesRegex(self, Exception, "Command failed"):
client.run("source . --source-folder sf --source-folder sf")
with six.assertRaisesRegex(self, Exception, "Command failed"):
client.run("source conanfile.py --source-folder sf --install-folder if "
"--install-folder rr")
def local_source_test(self):
conanfile = '''
from conans import ConanFile
from conans.util.files import save
class ConanLib(ConanFile):
def source(self):
self.output.info("Running source!")
err
save("file1.txt", "Hello World")
'''
# First, failing source()
client = TestClient()
client.save({CONANFILE: conanfile,
BUILD_INFO: ""})
client.run("source .", assert_error=True)
self.assertIn("conanfile.py: Running source!", client.user_io.out)
self.assertIn("ERROR: conanfile.py: Error in source() method, line 9", client.user_io.out)
# Fix the error and repeat
client.save({CONANFILE: conanfile.replace("err", "")})
client.run("source .")
self.assertIn("conanfile.py: Configuring sources in", client.user_io.out)
self.assertIn("conanfile.py: Running source!", client.user_io.out)
self.assertEqual("Hello World", load(os.path.join(client.current_folder, "file1.txt")))
|
{
"content_hash": "9a96770497169580bf5022656d61948e",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 100,
"avg_line_length": 36.92526690391459,
"alnum_prop": 0.611603700848111,
"repo_name": "memsharded/conan",
"id": "c29e768ea76f987cfa0973f77b2711e98925fe49",
"size": "10376",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/command/source_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
}
|
from twisted.internet.error import getConnectError
from twisted.internet import defer
from twisted.python.util import FancyEqMixin
from zope.interface import implementer
from twisted.internet.interfaces import IAddress, IStreamClientEndpoint
@implementer(IAddress)
class SerialAddress(FancyEqMixin, object):
"""
An L{interfaces.IAddress} provider for serial port connections.
@ivar name: The device name associated with this port
@type name: C{str}
"""
compareAttributes = ('name', )
def __init__(self, name):
self.name = name
def __repr__(self):
return 'SerialAddress(%r)' % (self.name,)
def __hash__(self):
return hash(self.name)
@implementer(IStreamClientEndpoint)
class SerialPortEndpoint(object):
"""
A Serial Port endpoint.
@type _serialport: L{serialport} module
@ivar _serialport: A hook used for testing availability of serial port
support.
"""
try:
from twisted.internet.serialport import SerialPort as _serialport
except ImportError:
_serialport = None
def __init__(self, deviceNameOrPortNumber, reactor, *args, **kwargs):
"""
@see: L{serialport.SerialPort}
"""
self._deviceNameOrPortNumber = deviceNameOrPortNumber
self._reactor = reactor
self._args = args
self._kwargs = kwargs
def connect(self, serialFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect to serial ports
@param serialFactory: The protocol factory which will build protocols
for connections to this service.
@type serialFactory: L{twisted.internet.interfaces.IProtocolFactory}
"""
try:
if self._serialport is None:
raise ImportError
else:
# noinspection PyArgumentList
proto = serialFactory.buildProtocol(
SerialAddress(self._deviceNameOrPortNumber) )
self._serialport(proto, self._deviceNameOrPortNumber,
self._reactor, *self._args, **self._kwargs)
return defer.succeed(proto)
except Exception, e:
return defer.fail(getConnectError(e))
|
{
"content_hash": "51d75b2972df67cd51d5bf3d8f9ef2c2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 31.23611111111111,
"alnum_prop": 0.6411738550466874,
"repo_name": "keturn/txOpenBCI",
"id": "c5508bc8b8fd5b465b621a4e3f54435097019cb4",
"size": "2427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txopenbci/serial_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "967"
},
{
"name": "Python",
"bytes": "27817"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from olympia.constants.promoted import RECOMMENDED
import olympia.core.logger
from olympia import amo
from olympia.amo.utils import attach_trans_dict
from olympia.amo.celery import create_chunked_tasks_signatures
from olympia.amo.utils import to_language
from olympia.constants.search import SEARCH_LANGUAGE_TO_ANALYZER
from olympia.search.utils import create_index
from olympia.versions.compare import version_int
log = olympia.core.logger.getLogger('z.es')
class AddonIndexer:
"""
Base Indexer class for add-ons.
"""
@classmethod
def attach_translation_mappings(cls, mapping, field_names):
"""
For each field in field_names, attach a dict to the ES mapping
properties making "<field_name>_translations" an object containing
"string" and "lang" as non-indexed strings.
Used to store non-indexed, non-analyzed translations in ES that will be
sent back by the API for each item. It does not take care of the
indexed content for search, it's there only to store and return
raw translations.
"""
for field_name in field_names:
# _translations is the suffix in TranslationSerializer.
mapping['properties'][
'%s_translations' % field_name
] = cls.get_translations_definition()
@classmethod
def get_translations_definition(cls):
"""
Return the mapping to use for raw translations (to be returned directly
by the API, not used for analysis).
See attach_translation_mappings() for more information.
"""
return {
'type': 'object',
'properties': {
'lang': {'type': 'text', 'index': False},
'string': {'type': 'text', 'index': False},
},
}
@classmethod
def get_raw_field_definition(cls):
"""
Return the mapping to use for the "raw" version of a field. Meant to be
used as part of a 'fields': {'raw': ... } definition in the mapping of
an existing field.
Used for exact matches and sorting
"""
# It needs to be a keyword to turnoff all analysis ; that means we
# don't get the lowercase filter applied by the standard &
# language-specific analyzers, so we need to do that ourselves through
# a custom normalizer for exact matches to work in a case-insensitive
# way.
return {
'type': 'keyword',
'normalizer': 'lowercase_keyword_normalizer',
}
@classmethod
def attach_language_specific_analyzers(cls, mapping, field_names):
"""
For each field in field_names, attach language-specific mappings that
will use specific analyzers for these fields in every language that we
support.
These mappings are used by the search filtering code if they exist.
"""
for lang, analyzer in SEARCH_LANGUAGE_TO_ANALYZER.items():
for field in field_names:
property_name = '%s_l10n_%s' % (field, lang)
mapping['properties'][property_name] = {
'type': 'text',
'analyzer': analyzer,
}
@classmethod
def attach_language_specific_analyzers_with_raw_variant(cls, mapping, field_names):
"""
Like attach_language_specific_analyzers() but with an extra field to
storethe "raw" variant of the value, for exact matches.
"""
for lang, analyzer in SEARCH_LANGUAGE_TO_ANALYZER.items():
for field in field_names:
property_name = '%s_l10n_%s' % (field, lang)
mapping['properties'][property_name] = {
'type': 'text',
'analyzer': analyzer,
'fields': {
'raw': cls.get_raw_field_definition(),
},
}
@classmethod
def extract_field_api_translations(cls, obj, field, db_field=None):
"""
Returns a dict containing translations that we need to store for
the API. Empty translations are skipped entirely.
"""
if db_field is None:
db_field = '%s_id' % field
extend_with_me = {
'%s_translations'
% field: [
{'lang': to_language(lang), 'string': str(string)}
for lang, string in obj.translations[getattr(obj, db_field)]
if string
]
}
return extend_with_me
@classmethod
def extract_field_search_translation(cls, obj, field, default_locale):
"""
Returns the translation for this field in the object's default locale,
in the form a dict with one entry (the field being the key and the
translation being the value, or an empty string if none was found).
That field will be analyzed and indexed by ES *without*
language-specific analyzers.
"""
translations = dict(obj.translations[getattr(obj, '%s_id' % field)])
default_locale = default_locale.lower() if default_locale else None
value = translations.get(default_locale, getattr(obj, field))
return {field: str(value) if value else ''}
@classmethod
def extract_field_analyzed_translations(cls, obj, field, db_field=None):
"""
Returns a dict containing translations for each language that we have
an analyzer for, for the given field.
When no translation exist for a given language+field combo, the value
returned is an empty string, to avoid storing the word "None" as the
field does not understand null values.
"""
if db_field is None:
db_field = '%s_id' % field
translations = dict(obj.translations[getattr(obj, db_field)])
return {
'%s_l10n_%s' % (field, lang): translations.get(lang) or ''
for lang in SEARCH_LANGUAGE_TO_ANALYZER
}
# Fields we don't need to expose in the results, only used for filtering
# or sorting.
hidden_fields = (
'*.raw',
'boost',
'colors',
'hotness',
# Translated content that is used for filtering purposes is stored
# under 3 different fields:
# - One field with all translations (e.g., "name").
# - One field for each language, using corresponding analyzer
# (e.g., "name_l10n_en-us", "name_l10n_fr", etc.)
# - One field with all translations in separate objects for the API
# (e.g. "name_translations")
# Only that last one with all translations needs to be returned.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
'summary',
'summary_l10n_*',
)
index_settings = {
'analysis': {
'analyzer': {
'standard_with_word_split': {
# This analyzer tries to split the text into words by using
# various methods. It also lowercases them and make sure
# each token is only returned once.
# Only use for short things with extremely meaningful
# content like add-on name - it makes too many
# modifications to be useful for things like descriptions,
# for instance.
'tokenizer': 'standard',
'filter': [
'custom_word_delimiter',
'lowercase',
'stop',
'custom_dictionary_decompounder',
'unique',
],
},
'trigram': {
# Analyzer that splits the text into trigrams.
'tokenizer': 'ngram_tokenizer',
'filter': [
'lowercase',
],
},
},
'tokenizer': {
'ngram_tokenizer': {
'type': 'ngram',
'min_gram': 3,
'max_gram': 3,
'token_chars': ['letter', 'digit'],
}
},
'normalizer': {
'lowercase_keyword_normalizer': {
# By default keywords are indexed 'as-is', but for exact
# name matches we need to lowercase them before indexing,
# so this normalizer does that for us.
'type': 'custom',
'filter': ['lowercase'],
},
},
'filter': {
'custom_word_delimiter': {
# This filter is useful for add-on names that have multiple
# words sticked together in a way that is easy to
# recognize, like FooBar, which should be indexed as FooBar
# and Foo Bar. (preserve_original: True makes us index both
# the original and the split version.)
'type': 'word_delimiter',
'preserve_original': True,
},
'custom_dictionary_decompounder': {
# This filter is also useful for add-on names that have
# multiple words sticked together, but without a pattern
# that we can automatically recognize. To deal with those,
# we use a small dictionary of common words. It allows us
# to index 'awesometabpassword' as 'awesome tab password',
# helping users looking for 'tab password' find that addon.
'type': 'dictionary_decompounder',
'word_list': [
'all',
'auto',
'ball',
'bar',
'block',
'blog',
'bookmark',
'browser',
'bug',
'button',
'cat',
'chat',
'click',
'clip',
'close',
'color',
'context',
'cookie',
'cool',
'css',
'delete',
'dictionary',
'down',
'download',
'easy',
'edit',
'fill',
'fire',
'firefox',
'fix',
'flag',
'flash',
'fly',
'forecast',
'fox',
'foxy',
'google',
'grab',
'grease',
'html',
'http',
'image',
'input',
'inspect',
'inspector',
'iris',
'js',
'key',
'keys',
'lang',
'link',
'mail',
'manager',
'map',
'mega',
'menu',
'menus',
'monkey',
'name',
'net',
'new',
'open',
'password',
'persona',
'privacy',
'query',
'screen',
'scroll',
'search',
'secure',
'select',
'smart',
'spring',
'status',
'style',
'super',
'sync',
'tab',
'text',
'think',
'this',
'time',
'title',
'translate',
'tree',
'undo',
'upload',
'url',
'user',
'video',
'window',
'with',
'word',
'zilla',
],
},
},
}
}
@classmethod
def get_model(cls):
from olympia.addons.models import Addon
return Addon
@classmethod
def get_index_alias(cls):
"""Return the index alias name."""
return settings.ES_INDEXES.get('default')
@classmethod
def get_mapping(cls):
appver_mapping = {
'properties': {
'max': {'type': 'long'},
'min': {'type': 'long'},
'max_human': {'type': 'keyword', 'index': False},
'min_human': {'type': 'keyword', 'index': False},
}
}
version_mapping = {
'type': 'object',
'properties': {
'compatible_apps': {
'properties': {app.id: appver_mapping for app in amo.APP_USAGE}
},
# Keep '<version>.id' indexed to be able to run exists queries
# on it.
'id': {'type': 'long'},
'reviewed': {'type': 'date', 'index': False},
'files': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'created': {'type': 'date', 'index': False},
'hash': {'type': 'keyword', 'index': False},
'filename': {'type': 'keyword', 'index': False},
'is_mozilla_signed_extension': {'type': 'boolean'},
'size': {'type': 'long', 'index': False},
'strict_compatibility': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'permissions': {'type': 'keyword', 'index': False},
'optional_permissions': {'type': 'keyword', 'index': False},
},
},
'license': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'builtin': {'type': 'short', 'index': False},
'name_translations': cls.get_translations_definition(),
'url': {'type': 'text', 'index': False},
},
},
'release_notes_translations': cls.get_translations_definition(),
'version': {'type': 'keyword', 'index': False},
},
}
mapping = {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'byte'},
'average_daily_users': {'type': 'long'},
'bayesian_rating': {'type': 'double'},
'boost': {'type': 'float', 'null_value': 1.0},
'category': {'type': 'integer'},
'colors': {
'type': 'nested',
'properties': {
'h': {'type': 'integer'},
's': {'type': 'integer'},
'l': {'type': 'integer'},
'ratio': {'type': 'double'},
},
},
'contributions': {'type': 'text'},
'created': {'type': 'date'},
'current_version': version_mapping,
'default_locale': {'type': 'keyword', 'index': False},
'description': {'type': 'text', 'analyzer': 'snowball'},
'guid': {'type': 'keyword'},
'has_eula': {'type': 'boolean', 'index': False},
'has_privacy_policy': {'type': 'boolean', 'index': False},
'hotness': {'type': 'double'},
'icon_hash': {'type': 'keyword', 'index': False},
'icon_type': {'type': 'keyword', 'index': False},
'is_disabled': {'type': 'boolean'},
'is_experimental': {'type': 'boolean'},
'is_recommended': {'type': 'boolean'},
'last_updated': {'type': 'date'},
'listed_authors': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'name': {'type': 'text'},
'username': {'type': 'keyword'},
'is_public': {'type': 'boolean', 'index': False},
},
},
'modified': {'type': 'date', 'index': False},
'name': {
'type': 'text',
# Adding word-delimiter to split on camelcase, known
# words like 'tab', and punctuation, and eliminate
# duplicates.
'analyzer': 'standard_with_word_split',
'fields': {
# Raw field for exact matches and sorting.
'raw': cls.get_raw_field_definition(),
# Trigrams for partial matches.
'trigrams': {
'type': 'text',
'analyzer': 'trigram',
},
},
},
'previews': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'caption_translations': cls.get_translations_definition(),
'modified': {'type': 'date', 'index': False},
'position': {'type': 'long', 'index': False},
'sizes': {
'type': 'object',
'properties': {
'thumbnail': {'type': 'short', 'index': False},
'image': {'type': 'short', 'index': False},
},
},
},
},
'promoted': {
'type': 'object',
'properties': {
'group_id': {'type': 'byte'},
'approved_for_apps': {'type': 'byte'},
},
},
'ratings': {
'type': 'object',
'properties': {
'count': {'type': 'short', 'index': False},
'average': {'type': 'float'},
},
},
'slug': {'type': 'keyword'},
'requires_payment': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'summary': {'type': 'text', 'analyzer': 'snowball'},
'tags': {'type': 'keyword'},
'type': {'type': 'byte'},
'weekly_downloads': {'type': 'long'},
},
}
# Add fields that we expect to return all translations without being
# analyzed/indexed.
cls.attach_translation_mappings(
mapping,
(
'description',
'developer_comments',
'homepage',
'name',
'summary',
'support_email',
'support_url',
),
)
# Add language-specific analyzers for localized fields that are
# analyzed/indexed.
cls.attach_language_specific_analyzers(mapping, ('description', 'summary'))
cls.attach_language_specific_analyzers_with_raw_variant(mapping, ('name',))
return mapping
@classmethod
def extract_version(cls, obj, version_obj):
from olympia.versions.models import License, Version
data = (
{
'id': version_obj.pk,
'compatible_apps': cls.extract_compatibility_info(obj, version_obj),
'files': [
{
'id': version_obj.file.id,
'created': version_obj.file.created,
'filename': version_obj.file.file.name,
'hash': version_obj.file.hash,
'is_mozilla_signed_extension': (
version_obj.file.is_mozilla_signed_extension
),
'size': version_obj.file.size,
'status': version_obj.file.status,
'strict_compatibility': version_obj.file.strict_compatibility,
'permissions': version_obj.file.permissions,
'optional_permissions': version_obj.file.optional_permissions,
}
],
'reviewed': version_obj.reviewed,
'version': version_obj.version,
}
if version_obj
else None
)
if data and version_obj:
attach_trans_dict(Version, [version_obj])
data.update(
cls.extract_field_api_translations(
version_obj, 'release_notes', db_field='release_notes_id'
)
)
if version_obj.license:
data['license'] = {
'id': version_obj.license.id,
'builtin': version_obj.license.builtin,
'url': version_obj.license.url,
}
attach_trans_dict(License, [version_obj.license])
data['license'].update(
cls.extract_field_api_translations(version_obj.license, 'name')
)
return data
@classmethod
def extract_compatibility_info(cls, obj, version_obj):
"""Return compatibility info for the specified version_obj, as will be
indexed in ES."""
compatible_apps = {}
for app, appver in version_obj.compatible_apps.items():
min_, max_ = appver.min.version_int, appver.max.version_int
min_human, max_human = appver.min.version, appver.max.version
if not version_obj.file.strict_compatibility:
# The files attached to this version are not using strict
# compatibility, so the max version essentially needs to be
# ignored - let's fake a super high one. We leave max_human
# alone to leave the API representation intact.
max_ = version_int('*')
compatible_apps[app.id] = {
'min': min_,
'min_human': min_human,
'max': max_,
'max_human': max_human,
}
return compatible_apps
@classmethod
def extract_document(cls, obj):
"""Extract indexable attributes from an add-on."""
from olympia.addons.models import Preview
attrs = (
'id',
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'hotness',
'icon_hash',
'icon_type',
'is_disabled',
'is_experimental',
'last_updated',
'modified',
'requires_payment',
'slug',
'status',
'type',
'weekly_downloads',
)
data = {attr: getattr(obj, attr) for attr in attrs}
data['colors'] = None
# Extract dominant colors from static themes.
if obj.type == amo.ADDON_STATICTHEME:
if obj.current_previews:
data['colors'] = obj.current_previews[0].colors
data['app'] = [app.id for app in obj.compatible_apps.keys()]
# Boost by the number of users on a logarithmic scale.
data['boost'] = float(data['average_daily_users'] ** 0.2)
# Quadruple the boost if the add-on is public.
if (
obj.status == amo.STATUS_APPROVED
and not obj.is_experimental
and 'boost' in data
):
data['boost'] = float(max(data['boost'], 1) * 4)
# We can use all_categories because the indexing code goes through the
# transformer that sets it.
data['category'] = [cat.id for cat in obj.all_categories]
data['current_version'] = cls.extract_version(obj, obj.current_version)
data['listed_authors'] = [
{
'name': a.name,
'id': a.id,
'username': a.username,
'is_public': a.is_public,
}
for a in obj.listed_authors
]
data['has_eula'] = bool(obj.eula)
data['has_privacy_policy'] = bool(obj.privacy_policy)
data['is_recommended'] = bool(
obj.promoted and obj.promoted.group == RECOMMENDED
)
data['previews'] = [
{
'id': preview.id,
'modified': preview.modified,
'sizes': preview.sizes,
'position': preview.position,
}
for preview in obj.current_previews
]
data['promoted'] = (
{
'group_id': obj.promoted.group_id,
# store the app approvals because .approved_applications needs it.
'approved_for_apps': [
app.id for app in obj.promoted.approved_applications
],
}
if obj.promoted
else None
)
data['ratings'] = {
'average': obj.average_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
# We can use tag_list because the indexing code goes through the
# transformer that sets it (attach_tags).
data['tags'] = getattr(obj, 'tag_list', [])
# Handle localized fields.
# First, deal with the 3 fields that need everything:
for field in ('description', 'name', 'summary'):
data.update(cls.extract_field_api_translations(obj, field))
data.update(
cls.extract_field_search_translation(obj, field, obj.default_locale)
)
data.update(cls.extract_field_analyzed_translations(obj, field))
# Then add fields that only need to be returned to the API without
# contributing to search relevancy.
for field in ('developer_comments', 'homepage', 'support_email', 'support_url'):
data.update(cls.extract_field_api_translations(obj, field))
if obj.type != amo.ADDON_STATICTHEME:
# Also do that for preview captions, which are set on each preview
# object.
attach_trans_dict(Preview, obj.current_previews)
for i, preview in enumerate(obj.current_previews):
data['previews'][i].update(
cls.extract_field_api_translations(preview, 'caption')
)
return data
@classmethod
def create_new_index(cls, index_name):
"""
Create a new index for addons in ES.
Intended to be used by reindexation (and tests), generally a bad idea
to call manually.
"""
create_index(
index=index_name,
mappings=cls.get_mapping(),
index_settings={
# create_index() will add its own index settings like number of
# shards and replicas.
'index': cls.index_settings
},
)
@classmethod
def reindex_tasks_group(cls, index_name):
"""
Return the group of tasks to execute for a full reindex of addons on
the index called `index_name` (which is not an alias but the real
index name).
"""
from olympia.addons.tasks import index_addons
ids = cls.get_model().unfiltered.values_list('id', flat=True).order_by('id')
chunk_size = 150
return create_chunked_tasks_signatures(
index_addons, list(ids), chunk_size, task_kwargs={'index': index_name}
)
|
{
"content_hash": "fd568455ab8ce295bcda7e1ea5ca89b7",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 88,
"avg_line_length": 38.817819148936174,
"alnum_prop": 0.4501044842588469,
"repo_name": "wagnerand/addons-server",
"id": "20e305e440ca905d74eec8f060d7ae78963d77ef",
"size": "29191",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/addons/indexers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245987"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290334"
},
{
"name": "JavaScript",
"bytes": "749163"
},
{
"name": "Less",
"bytes": "211386"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6780019"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
}
|
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetQueueAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import ETH_TYPE_IPv4
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_43():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 43 Start")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Set Queue example"
priority = 1000
cookie = 1400
match_in_port = 109
match_eth_type = ETH_TYPE_IPv4
match_ipv4_dst_addr = "10.0.0.0/8"
act_queue_id = 7
act_out_port = 112
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IPv4 Destination Address (%s)" %
(match_in_port,
hex(match_eth_type),
match_ipv4_dst_addr))
print (" Actions: Set Queue (%s)\n"
" Output (%s)" %
(act_queue_id, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetQueueAction(action_order)
action.set_gueue_id(act_queue_id)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ipv4_dst(match_ipv4_dst_addr)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print "\n".strip()
print ("<<< Get Queues Statistics for Port '%s'") % act_out_port
time.sleep(rundelay)
queues = []
result = ofswitch.get_port_queues_stats(act_out_port, decode_obj=True)
status = result.get_status()
if(status.eq(STATUS.OK)):
queues = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
pass
else:
print ("\n")
print ("!!!Error, failed to get queue statistics for port '%s" %
act_out_port)
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print "\n".strip()
s = "Port '{} - Queues Statistics'".format(act_out_port)
print " %s\n" % s
if queues:
for queue in sorted(queues, key=lambda q: q.queue_id()):
print " Queue '{}'".format(queue.queue_id())
print "\n".strip()
print " Tx Packets : {}".format(queue.tx_pkts())
print " Tx Bytes : {}".format(queue.tx_bytes())
print " Tx Errors : {}".format(queue.tx_errs())
print " Duration : {}s".format(queue.time_alive())
print "\n".strip()
else:
print " None"
print "\n".strip()
print ("<<< Get Queue '%s' Statistics for Port '%s'" %
(act_queue_id, act_out_port))
time.sleep(rundelay)
queue = None
result = ofswitch.get_port_queue_stats(port_num=act_out_port,
queue_id=act_queue_id,
decode_obj=True)
status = result.get_status()
if(status.eq(STATUS.OK)):
queue = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
pass
else:
print ("\n")
print ("!!!Error, failed to get port '%s' queue '%s' statistics " %
act_out_port, act_queue_id)
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print "\n".strip()
s = "Port '{}' - Queue '{}' Statistics".format(act_out_port, act_queue_id)
print " %s\n" % s
if queue:
print " Tx Packets : {}".format(queue.tx_pkts())
print " Tx Bytes : {}".format(queue.tx_bytes())
print " Tx Errors : {}".format(queue.tx_errs())
print " Duration : {}s".format(queue.time_alive())
print "\n".strip()
else:
print " None"
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_43()
|
{
"content_hash": "bfea6e5b9fa120a5b446b3a187d797c8",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 78,
"avg_line_length": 32.64159292035398,
"alnum_prop": 0.5259590619493019,
"repo_name": "jebpublic/pybvc",
"id": "6c8ae20369d1276bd0bfe0df3762ba9e002a9b96",
"size": "8951",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "samples/sampleopenflow/demos/demo43.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "436853"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.cl.forms import CLRegionSelect, CLRutField
class CLLocalFlavorTests(SimpleTestCase):
def test_CLRegionSelect(self):
f = CLRegionSelect()
out = '''<select name="foo">
<option value="RM">Regi\xf3n Metropolitana de Santiago</option>
<option value="I">Regi\xf3n de Tarapac\xe1</option>
<option value="II">Regi\xf3n de Antofagasta</option>
<option value="III">Regi\xf3n de Atacama</option>
<option value="IV">Regi\xf3n de Coquimbo</option>
<option value="V">Regi\xf3n de Valpara\xedso</option>
<option value="VI">Regi\xf3n del Libertador Bernardo O'Higgins</option>
<option value="VII">Regi\xf3n del Maule</option>
<option value="VIII">Regi\xf3n del B\xedo B\xedo</option>
<option value="IX">Regi\xf3n de la Araucan\xeda</option>
<option value="X">Regi\xf3n de los Lagos</option>
<option value="XI">Regi\xf3n de Ays\xe9n del General Carlos Ib\xe1\xf1ez del Campo</option>
<option value="XII">Regi\xf3n de Magallanes y la Ant\xe1rtica Chilena</option>
<option value="XIV">Regi\xf3n de Los R\xedos</option>
<option value="XV">Regi\xf3n de Arica-Parinacota</option>
</select>'''
self.assertHTMLEqual(f.render('foo', 'bar'), out)
def test_CLRutField(self):
error_invalid = ['The Chilean RUT is not valid.']
error_format = ['Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.']
valid = {
'11-6': '11-6',
'116': '11-6',
'767484100': '76.748.410-0',
'78.412.790-7': '78.412.790-7',
'8.334.6043': '8.334.604-3',
'76793310-K': '76.793.310-K',
'76793310-k': '76.793.310-K',
}
invalid = {
'11.111.111-0': error_invalid,
'111': error_invalid,
}
self.assertFieldOutput(CLRutField, valid, invalid)
# deal with special "Strict Mode".
invalid = {
'11-6': error_format,
'767484100': error_format,
'8.334.6043': error_format,
'76793310-K': error_format,
'11.111.111-0': error_invalid
}
self.assertFieldOutput(CLRutField,
{}, invalid, field_kwargs={"strict": True}
)
|
{
"content_hash": "8064f11f9e65808d2f38f0f0dc821d55",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 91,
"avg_line_length": 39.706896551724135,
"alnum_prop": 0.6100738167607469,
"repo_name": "agustin380/django-localflavor",
"id": "2ad9909e2a019620c347c24a9a1a41e6432057c4",
"size": "2303",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/test_cl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "814929"
}
],
"symlink_target": ""
}
|
"""TensorFlow impl of Spatial Softmax layers. (spatial soft arg-max).
TODO(T2R_CONTRIBUTORS) - consider replacing with contrib version.
"""
import gin
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
@gin.configurable
def BuildSpatialSoftmax(features, spatial_gumbel_softmax=False):
"""Computes the spatial softmax of the input features.
Args:
features: A tensor of size [batch_size, num_rows, num_cols, num_features]
spatial_gumbel_softmax: If set to True, samples locations stochastically
rather than computing expected coordinates with respect to heatmap.
Returns:
A tuple of (expected_feature_points, softmax).
expected_feature_points: A tensor of size
[batch_size, num_features * 2]. These are the expected feature
locations, i.e., the spatial softmax of feature_maps. The inner
dimension is arranged as [x1, x2, x3 ... xN, y1, y2, y3, ... yN].
softmax: A Tensor which is the softmax of the features.
[batch_size, num_rows, num_cols, num_features].
"""
_, num_rows, num_cols, num_features = features.get_shape().as_list()
with tf.name_scope('SpatialSoftmax'):
# Create tensors for x and y positions, respectively
x_pos = np.empty([num_rows, num_cols], np.float32)
y_pos = np.empty([num_rows, num_cols], np.float32)
# Assign values to positions
for i in range(num_rows):
for j in range(num_cols):
x_pos[i, j] = 2.0 * j / (num_cols - 1.0) - 1.0
y_pos[i, j] = 2.0 * i / (num_rows - 1.0) - 1.0
x_pos = tf.reshape(x_pos, [num_rows * num_cols])
y_pos = tf.reshape(y_pos, [num_rows * num_cols])
# We reorder the features (norm3) into the following order:
# [batch_size, NUM_FEATURES, num_rows, num_cols]
# This lets us merge the batch_size and num_features dimensions, in order
# to compute spatial softmax as a single batch operation.
features = tf.reshape(
tf.transpose(features, [0, 3, 1, 2]), [-1, num_rows * num_cols])
if spatial_gumbel_softmax:
# Temperature is hard-coded for now, make this more flexible if results
# are promising.
dist = tfp.distributions.RelaxedOneHotCategorical(
temperature=1.0, logits=features)
softmax = dist.sample()
else:
softmax = tf.nn.softmax(features)
# Element-wise multiplication
x_output = tf.multiply(x_pos, softmax)
y_output = tf.multiply(y_pos, softmax)
# Sum per out_size x out_size
x_output = tf.reduce_sum(x_output, [1], keep_dims=True)
y_output = tf.reduce_sum(y_output, [1], keep_dims=True)
# Concatenate x and y, and reshape.
expected_feature_points = tf.reshape(
tf.concat([x_output, y_output], 1), [-1, num_features*2])
softmax = tf.transpose(
tf.reshape(softmax, [-1, num_features, num_rows,
num_cols]), [0, 2, 3, 1])
return expected_feature_points, softmax
|
{
"content_hash": "f8a0fb13651750cbe6d30e283ee8d973",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 77,
"avg_line_length": 40.54794520547945,
"alnum_prop": 0.6608108108108108,
"repo_name": "google-research/tensor2robot",
"id": "c755858357b5468ff2f05bb0b145e20c92612ecf",
"size": "3565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "layers/spatial_softmax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1124212"
}
],
"symlink_target": ""
}
|
"""Imports modules that should be scanned during API generation.
This file should eventually contain everything we need to scan looking for
tf_export decorators.
"""
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
# pylint: disable=unused-import,g-importing-member
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import config
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import * # pylint: disable=redefined-builtin
# Function
from tensorflow.core.function.trace_type import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
from tensorflow.python.util.tf_export import tf_export
# _internal APIs
from tensorflow.python.distribute.combinations import generate
from tensorflow.python.distribute.experimental.rpc.rpc_ops import *
from tensorflow.python.distribute.merge_call_interim import *
from tensorflow.python.distribute.multi_process_runner import *
from tensorflow.python.distribute.multi_worker_test_base import *
from tensorflow.python.distribute.sharded_variable import *
from tensorflow.python.distribute.strategy_combinations import *
from tensorflow.python.framework.combinations import *
from tensorflow.python.framework.composite_tensor import *
from tensorflow.python.framework.test_combinations import *
from tensorflow.python.util.tf_decorator import make_decorator
from tensorflow.python.util.tf_decorator import unwrap
from tensorflow.python.distribute.parameter_server_strategy_v2 import *
from tensorflow.python.distribute.coordinator.cluster_coordinator import *
tf_export('__internal__.decorator.make_decorator', v1=[])(make_decorator)
tf_export('__internal__.decorator.unwrap', v1=[])(unwrap)
# Export protos
# pylint: disable=undefined-variable
tf_export(v1=['AttrValue'])(AttrValue)
tf_export(v1=['ConfigProto'])(ConfigProto)
tf_export(v1=['Event', 'summary.Event'])(Event)
tf_export(v1=['GPUOptions'])(GPUOptions)
tf_export(v1=['GraphOptions'])(GraphOptions)
tf_export(v1=['HistogramProto'])(HistogramProto)
tf_export(v1=['LogMessage'])(LogMessage)
tf_export(v1=['MetaGraphDef'])(MetaGraphDef)
tf_export(v1=['NameAttrList'])(NameAttrList)
tf_export(v1=['NodeDef'])(NodeDef)
tf_export(v1=['OptimizerOptions'])(OptimizerOptions)
tf_export(v1=['RunMetadata'])(RunMetadata)
tf_export(v1=['RunOptions'])(RunOptions)
tf_export(v1=['SessionLog', 'summary.SessionLog'])(SessionLog)
tf_export(v1=['Summary', 'summary.Summary'])(Summary)
tf_export(v1=['summary.SummaryDescription'])(SummaryDescription)
tf_export(v1=['SummaryMetadata'])(SummaryMetadata)
tf_export(v1=['summary.TaggedRunMetadata'])(TaggedRunMetadata)
tf_export(v1=['TensorInfo'])(TensorInfo)
# pylint: enable=undefined-variable
|
{
"content_hash": "f1e439eceb012236046bd3cc9a493fe3",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 92,
"avg_line_length": 41.65060240963855,
"alnum_prop": 0.8009835117153601,
"repo_name": "gautam1858/tensorflow",
"id": "8681a2c6327a34c9230ae109ac680a45b290da28",
"size": "4146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/modules_with_exports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "47492"
},
{
"name": "C",
"bytes": "1129549"
},
{
"name": "C#",
"bytes": "13496"
},
{
"name": "C++",
"bytes": "116904214"
},
{
"name": "CMake",
"bytes": "165809"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "341994"
},
{
"name": "Go",
"bytes": "2052513"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1053827"
},
{
"name": "JavaScript",
"bytes": "5772"
},
{
"name": "Jupyter Notebook",
"bytes": "787371"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "9549263"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "180638"
},
{
"name": "Objective-C++",
"bytes": "295149"
},
{
"name": "Pawn",
"bytes": "5336"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "43775271"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "7854"
},
{
"name": "Shell",
"bytes": "566970"
},
{
"name": "Smarty",
"bytes": "89664"
},
{
"name": "SourcePawn",
"bytes": "8509"
},
{
"name": "Starlark",
"bytes": "6897556"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from enum import Enum
import onnx
from onnx import OperatorSetIdProto, TensorProto, helper
opsets = []
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 12
onnxdomain.domain = "" # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
opsets.append(onnxdomain)
msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = "com.microsoft"
opsets.append(msdomain)
kwargs = {}
kwargs["opset_imports"] = opsets
def GenerateModel(model_name):
nodes = [ # subgraph
# float
helper.make_node("Not", ["X"], ["not_X_1"], "not_1"),
helper.make_node("Where", ["not_X_1", "v0", "v1"], ["Y1"], "where_1"),
helper.make_node("Not", ["not_X_1"], ["x"], "not_2"),
helper.make_node("Identity", ["v0"], ["v0_edge"], "identity_v0"),
helper.make_node("Identity", ["v1"], ["v1_edge"], "identity_v1"),
helper.make_node("Where", ["x", "v0_edge", "v1_edge"], ["Y2"], "where_2"),
helper.make_node("Not", ["X"], ["not_X_2"], "not_3"),
helper.make_node("Where", ["not_X_2", "v0", "v1"], ["Y3"], "where_3"),
helper.make_node("Not", ["X"], ["not_X_3"], "not_4"),
helper.make_node("Where", ["not_X_3", "v0", "v1"], ["Y4"], "where_4"),
helper.make_node("Where", ["not_X_3", "v0", "v1"], ["Y5"], "where_5"),
]
inputs = [ # inputs
helper.make_tensor_value_info("X", TensorProto.BOOL, ["M", "K"]),
]
initializers = [
helper.make_tensor("v0", TensorProto.FLOAT, [1], [1.0]),
helper.make_tensor("v1", TensorProto.FLOAT, [1], [-1.0]),
]
graph = helper.make_graph(
nodes,
"NotWhere", # name
inputs,
[ # outputs
helper.make_tensor_value_info("not_X_2", TensorProto.BOOL, ["M", "K"]),
helper.make_tensor_value_info("Y1", TensorProto.FLOAT, ["M", "K"]),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, ["M", "K"]),
helper.make_tensor_value_info("Y3", TensorProto.FLOAT, ["M", "K"]),
helper.make_tensor_value_info("Y4", TensorProto.FLOAT, ["M", "K"]),
helper.make_tensor_value_info("Y5", TensorProto.FLOAT, ["M", "K"]),
],
initializers,
)
model = helper.make_model(graph, **kwargs)
onnx.save(model, model_name)
if __name__ == "__main__":
GenerateModel("not_where.onnx")
|
{
"content_hash": "59cb5d6f839fa9f4b95ff0ae6690a1bf",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 148,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.5624483043837882,
"repo_name": "microsoft/onnxruntime",
"id": "7e48164d5161a882e733fdec88b751510243f4d4",
"size": "2418",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnxruntime/test/testdata/transform/fusion/not_where.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
}
|
import pytest
import main
# Note: samples that do end-user auth are difficult to test in an automated
# way. These tests are basic confidence checks.
@pytest.fixture
def client():
main.app.testing = True
return main.app.test_client()
def test_index_wo_credentials(client):
r = client.get('/')
assert r.status_code == 302
assert r.headers['location'].endswith('authorize')
def test_authorize(client):
r = client.get('/authorize')
assert r.status_code == 302
assert r.headers['location'].startswith('https://accounts.google.com')
|
{
"content_hash": "9ab029e780e91ca91733198a90f54e91",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.6971830985915493,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "5e8e07296249b3f829f519e8f782574dfe5b9f99",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "auth/end-user/web/main_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
INSTANCE_TYPES = {
"a1.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.2xlarge",
"instancesku": "D5XG23APR65M5SYE",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "a1.2xlarge",
"name": "a1.2xlarge",
"ram": 16384,
},
"a1.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "a1.4xlarge",
"name": "a1.4xlarge",
"ram": 32768,
},
"a1.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.large",
"instancesku": "GC2JN6QM5JNRYDST",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "a1.large",
"name": "a1.large",
"ram": 4096,
},
"a1.medium": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.medium",
"instancesku": "899RCWM3249A3E8S",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "a1.medium",
"name": "a1.medium",
"ram": 2048,
},
"a1.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.xlarge",
"instancesku": "44BG498Y4VQ5GM28",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "a1.xlarge",
"name": "a1.xlarge",
"ram": 8192,
},
"c1.medium": {
"bandwidth": None,
"disk": 350,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "5",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Compute optimized",
"instanceType": "c1.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "1.7 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 350 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c1.medium",
"name": "c1.medium",
"ram": 1740,
},
"c1.xlarge": {
"bandwidth": None,
"disk": 1680,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "20",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Compute optimized",
"instanceType": "c1.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "7 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 420 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c1.xlarge",
"name": "c1.xlarge",
"ram": 7168,
},
"c3.2xlarge": {
"bandwidth": None,
"disk": 160,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "28",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.2xlarge",
"instancesku": "YCNS7SECEMFTZFYH",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 80 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c3.2xlarge",
"name": "c3.2xlarge",
"ram": 15360,
},
"c3.4xlarge": {
"bandwidth": None,
"disk": 320,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "55",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 160 SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c3.4xlarge",
"name": "c3.4xlarge",
"ram": 30720,
},
"c3.8xlarge": {
"bandwidth": 10,
"disk": 640,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "108",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.8xlarge",
"instancesku": "FKFNCVGF8F4VBXQ4",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "60 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 320 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c3.8xlarge",
"name": "c3.8xlarge",
"ram": 61440,
},
"c3.large": {
"bandwidth": None,
"disk": 32,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "7",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 16 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c3.large",
"name": "c3.large",
"ram": 3840,
},
"c3.xlarge": {
"bandwidth": None,
"disk": 80,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "14",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.xlarge",
"instancesku": "X2KK4E7Y22HQUHTD",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "7.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 40 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c3.xlarge",
"name": "c3.xlarge",
"ram": 7680,
},
"c4.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c4.2xlarge",
"name": "c4.2xlarge",
"ram": 15360,
},
"c4.4xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2000 Mbps",
"ecu": "62",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.4xlarge",
"instancesku": "PYDH5P62F8KBF85B",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c4.4xlarge",
"name": "c4.4xlarge",
"ram": 30720,
},
"c4.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4000 Mbps",
"ecu": "132",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.8xlarge",
"instancesku": "4VRUFQGZS9QFSB7E",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "60 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "36",
"vpcnetworkingsupport": "true",
},
"id": "c4.8xlarge",
"name": "c4.8xlarge",
"ram": 61440,
},
"c4.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "500 Mbps",
"ecu": "8",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.large",
"instancesku": "3N2AT4Z2NBKAT2N4",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c4.large",
"name": "c4.large",
"ram": 3840,
},
"c4.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "7.5 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c4.xlarge",
"name": "c4.xlarge",
"ram": 7680,
},
"c5.12xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "188",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.12xlarge",
"instancesku": "B2D6JZJWM7HPMFFP",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c5.12xlarge",
"name": "c5.12xlarge",
"ram": 98304,
},
"c5.18xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "281",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.18xlarge",
"instancesku": "H2XX5A4Y823VVMXZ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "144 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "144",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "72",
"vpcnetworkingsupport": "true",
},
"id": "c5.18xlarge",
"name": "c5.18xlarge",
"ram": 147456,
},
"c5.24xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "375",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.24xlarge",
"instancesku": "GB2CVQJXDU9FR8GT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "c5.24xlarge",
"name": "c5.24xlarge",
"ram": 196608,
},
"c5.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "39",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c5.2xlarge",
"name": "c5.2xlarge",
"ram": 16384,
},
"c5.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "73",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c5.4xlarge",
"name": "c5.4xlarge",
"ram": 32768,
},
"c5.9xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "139",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.9xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "72 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "72",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "36",
"vpcnetworkingsupport": "true",
},
"id": "c5.9xlarge",
"name": "c5.9xlarge",
"ram": 73728,
},
"c5.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c5.large",
"name": "c5.large",
"ram": 4096,
},
"c5.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "20",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.xlarge",
"instancesku": "K6WNSK6Q2CWEYGHX",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c5.xlarge",
"name": "c5.xlarge",
"ram": 8192,
},
"c5a.12xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c5a.12xlarge",
"name": "c5a.12xlarge",
"ram": 98304,
},
"c5a.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6300 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.16xlarge",
"instancesku": "U4MEJNF3728KAVBF",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "c5a.16xlarge",
"name": "c5a.16xlarge",
"ram": 131072,
},
"c5a.24xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.24xlarge",
"instancesku": "DPHJ9PS8GF7ZRFEF",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "c5a.24xlarge",
"name": "c5a.24xlarge",
"ram": 196608,
},
"c5a.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.2xlarge",
"instancesku": "78USCN84VB25CP2Y",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c5a.2xlarge",
"name": "c5a.2xlarge",
"ram": 16384,
},
"c5a.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.4xlarge",
"instancesku": "FXZCY68C879B2BXD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c5a.4xlarge",
"name": "c5a.4xlarge",
"ram": 32768,
},
"c5a.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.8xlarge",
"instancesku": "5PMN73A4GCYVKWT5",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c5a.8xlarge",
"name": "c5a.8xlarge",
"ram": 65536,
},
"c5a.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c5a.large",
"name": "c5a.large",
"ram": 4096,
},
"c5a.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5a.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c5a.xlarge",
"name": "c5a.xlarge",
"ram": 8192,
},
"c5ad.12xlarge": {
"bandwidth": 12,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.12xlarge",
"instancesku": "AMC8CQE4QGF9WSKJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.12xlarge",
"name": "c5ad.12xlarge",
"ram": 98304,
},
"c5ad.16xlarge": {
"bandwidth": 20,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6300 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1200 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.16xlarge",
"name": "c5ad.16xlarge",
"ram": 131072,
},
"c5ad.24xlarge": {
"bandwidth": 20,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.24xlarge",
"instancesku": "W45YTYFWCBGWJGSQ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.24xlarge",
"name": "c5ad.24xlarge",
"ram": 196608,
},
"c5ad.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.2xlarge",
"instancesku": "D8MQ8PRJMKXJHGCR",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.2xlarge",
"name": "c5ad.2xlarge",
"ram": 16384,
},
"c5ad.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.4xlarge",
"instancesku": "TQMPFTV5HMXTXSZJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.4xlarge",
"name": "c5ad.4xlarge",
"ram": 32768,
},
"c5ad.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.8xlarge",
"instancesku": "X2DT43NU2ZYNA6R3",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.8xlarge",
"name": "c5ad.8xlarge",
"ram": 65536,
},
"c5ad.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.large",
"instancesku": "HT8MSHND4S9PP8YT",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.large",
"name": "c5ad.large",
"ram": 4096,
},
"c5ad.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5ad.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c5ad.xlarge",
"name": "c5ad.xlarge",
"ram": 8192,
},
"c5d.12xlarge": {
"bandwidth": 12,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "188",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.12xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c5d.12xlarge",
"name": "c5d.12xlarge",
"ram": 98304,
},
"c5d.18xlarge": {
"bandwidth": 25,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "281",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.18xlarge",
"instancesku": "RDHAU3QJUFDHWGAF",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "144 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "144",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "72",
"vpcnetworkingsupport": "true",
},
"id": "c5d.18xlarge",
"name": "c5d.18xlarge",
"ram": 147456,
},
"c5d.24xlarge": {
"bandwidth": 25,
"disk": 3600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "375",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.24xlarge",
"instancesku": "CBCNASX9DSMBPMQW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "c5d.24xlarge",
"name": "c5d.24xlarge",
"ram": 196608,
},
"c5d.2xlarge": {
"bandwidth": 10,
"disk": 200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "39",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 200 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c5d.2xlarge",
"name": "c5d.2xlarge",
"ram": 16384,
},
"c5d.4xlarge": {
"bandwidth": 10,
"disk": 400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "73",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.4xlarge",
"instancesku": "FUGNF8J88PBAQDYT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 400 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c5d.4xlarge",
"name": "c5d.4xlarge",
"ram": 32768,
},
"c5d.9xlarge": {
"bandwidth": 10,
"disk": 900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "139",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.9xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "72 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "72",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 900 NVMe SSD",
"vcpu": "36",
"vpcnetworkingsupport": "true",
},
"id": "c5d.9xlarge",
"name": "c5d.9xlarge",
"ram": 73728,
},
"c5d.large": {
"bandwidth": 10,
"disk": 50,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 50 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c5d.large",
"name": "c5d.large",
"ram": 4096,
},
"c5d.xlarge": {
"bandwidth": 10,
"disk": 100,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "20",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.xlarge",
"instancesku": "BEKRK57J4EHTR2VJ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 100 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c5d.xlarge",
"name": "c5d.xlarge",
"ram": 8192,
},
"c5n.18xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "281",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.18xlarge",
"instancesku": "4VUXUHRQ7ZTHKVDG",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "144",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "72",
"vpcnetworkingsupport": "true",
},
"id": "c5n.18xlarge",
"name": "c5n.18xlarge",
"ram": 196608,
},
"c5n.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "39",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "21 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c5n.2xlarge",
"name": "c5n.2xlarge",
"ram": 21504,
},
"c5n.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "73",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "42 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c5n.4xlarge",
"name": "c5n.4xlarge",
"ram": 43008,
},
"c5n.9xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "139",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.9xlarge",
"instancesku": "ZCU5ZN54RFFWS4ZW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "72",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "36",
"vpcnetworkingsupport": "true",
},
"id": "c5n.9xlarge",
"name": "c5n.9xlarge",
"ram": 98304,
},
"c5n.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "5.25 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c5n.large",
"name": "c5n.large",
"ram": 5376,
},
"c5n.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "20",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "10.5 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c5n.xlarge",
"name": "c5n.xlarge",
"ram": 10752,
},
"c6a.12xlarge": {
"bandwidth": 18750,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.12xlarge",
"instancesku": "4DS8T6KGKGZAMR4E",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "18750 Megabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c6a.12xlarge",
"name": "c6a.12xlarge",
"ram": 98304,
},
"c6a.16xlarge": {
"bandwidth": 25000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13333 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.16xlarge",
"instancesku": "ZYKSSAJFDSUDXYYM",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25000 Megabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "c6a.16xlarge",
"name": "c6a.16xlarge",
"ram": 131072,
},
"c6a.24xlarge": {
"bandwidth": 37500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.24xlarge",
"instancesku": "J8F6XP3AVRUVYBNW",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "37500 Megabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "c6a.24xlarge",
"name": "c6a.24xlarge",
"ram": 196608,
},
"c6a.2xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.2xlarge",
"instancesku": "3WJBAWQUBBE5F5YH",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c6a.2xlarge",
"name": "c6a.2xlarge",
"ram": 16384,
},
"c6a.32xlarge": {
"bandwidth": 50000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "26667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.32xlarge",
"instancesku": "HK95HRF8T46W5R3Y",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "50000 Megabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "c6a.32xlarge",
"name": "c6a.32xlarge",
"ram": 262144,
},
"c6a.48xlarge": {
"bandwidth": 50000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.48xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "50000 Megabit",
"normalizationSizeFactor": "384",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "192",
"vpcnetworkingsupport": "true",
},
"id": "c6a.48xlarge",
"name": "c6a.48xlarge",
"ram": 393216,
},
"c6a.4xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c6a.4xlarge",
"name": "c6a.4xlarge",
"ram": 32768,
},
"c6a.8xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.8xlarge",
"instancesku": "EEBQJJ2XFJ85RVAG",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "12500 Megabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c6a.8xlarge",
"name": "c6a.8xlarge",
"ram": 65536,
},
"c6a.large": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.large",
"instancesku": "APBN7UCQ2YC222ZE",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c6a.large",
"name": "c6a.large",
"ram": 4096,
},
"c6a.xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6a.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c6a.xlarge",
"name": "c6a.xlarge",
"ram": 8192,
},
"c6g.12xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.12xlarge",
"instancesku": "5427FJVXMN85SC8G",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c6g.12xlarge",
"name": "c6g.12xlarge",
"ram": 98304,
},
"c6g.16xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.16xlarge",
"instancesku": "M8TJWAS9QQMZ7D8T",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "c6g.16xlarge",
"name": "c6g.16xlarge",
"ram": 131072,
},
"c6g.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.2xlarge",
"instancesku": "64UAZ5JQ6EFAVPMB",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c6g.2xlarge",
"name": "c6g.2xlarge",
"ram": 16384,
},
"c6g.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c6g.4xlarge",
"name": "c6g.4xlarge",
"ram": 32768,
},
"c6g.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.8xlarge",
"instancesku": "PB5P6ZJYNWR2SRGE",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c6g.8xlarge",
"name": "c6g.8xlarge",
"ram": 65536,
},
"c6g.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.large",
"instancesku": "6PQ4U29URPRQ2MYZ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c6g.large",
"name": "c6g.large",
"ram": 4096,
},
"c6g.medium": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "c6g.medium",
"name": "c6g.medium",
"ram": 2048,
},
"c6g.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6g.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c6g.xlarge",
"name": "c6g.xlarge",
"ram": 8192,
},
"c6gd.12xlarge": {
"bandwidth": 20,
"disk": 2850,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1425 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.12xlarge",
"name": "c6gd.12xlarge",
"ram": 98304,
},
"c6gd.16xlarge": {
"bandwidth": 25,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "18000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.16xlarge",
"instancesku": "BN5N4P25EEGXQXAK",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.16xlarge",
"name": "c6gd.16xlarge",
"ram": 131072,
},
"c6gd.2xlarge": {
"bandwidth": 10,
"disk": 475,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.2xlarge",
"instancesku": "VS68MNYP4R23S8R5",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 475 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.2xlarge",
"name": "c6gd.2xlarge",
"ram": 16384,
},
"c6gd.4xlarge": {
"bandwidth": 10,
"disk": 950,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.4xlarge",
"instancesku": "8DYFS6J38M6GB9JE",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 950 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.4xlarge",
"name": "c6gd.4xlarge",
"ram": 32768,
},
"c6gd.8xlarge": {
"bandwidth": 10,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.8xlarge",
"name": "c6gd.8xlarge",
"ram": 65536,
},
"c6gd.large": {
"bandwidth": 10,
"disk": 118,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 118 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.large",
"name": "c6gd.large",
"ram": 4096,
},
"c6gd.medium": {
"bandwidth": 10,
"disk": 59,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.medium",
"instancesku": "7SM9BN5DJ2TG9E5R",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 59 NVMe SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.medium",
"name": "c6gd.medium",
"ram": 2048,
},
"c6gd.xlarge": {
"bandwidth": 10,
"disk": 237,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gd.xlarge",
"instancesku": "8JNY7VUMPB55FZXV",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 237 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c6gd.xlarge",
"name": "c6gd.xlarge",
"ram": 8192,
},
"c6gn.12xlarge": {
"bandwidth": 75,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "28500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.12xlarge",
"name": "c6gn.12xlarge",
"ram": 98304,
},
"c6gn.16xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "38000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.16xlarge",
"instancesku": "USCWQCCCVXKT957C",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.16xlarge",
"name": "c6gn.16xlarge",
"ram": 131072,
},
"c6gn.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.2xlarge",
"instancesku": "M6MVR7VW6XTWPHQ5",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.2xlarge",
"name": "c6gn.2xlarge",
"ram": 16384,
},
"c6gn.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.4xlarge",
"name": "c6gn.4xlarge",
"ram": 32768,
},
"c6gn.8xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.8xlarge",
"instancesku": "NX8CJEX2JY5HXHQR",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.8xlarge",
"name": "c6gn.8xlarge",
"ram": 65536,
},
"c6gn.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.large",
"name": "c6gn.large",
"ram": 4096,
},
"c6gn.medium": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.medium",
"name": "c6gn.medium",
"ram": 2048,
},
"c6gn.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6gn.xlarge",
"instancesku": "WB4FCM6TXPX93PGW",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c6gn.xlarge",
"name": "c6gn.xlarge",
"ram": 8192,
},
"c6i.12xlarge": {
"bandwidth": 18750,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "15000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.12xlarge",
"instancesku": "XBWGTH2ZPCNK49B9",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "18750 Megabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "c6i.12xlarge",
"name": "c6i.12xlarge",
"ram": 98304,
},
"c6i.16xlarge": {
"bandwidth": 25000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.16xlarge",
"instancesku": "U2QZS64THC8FN76P",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25000 Megabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "c6i.16xlarge",
"name": "c6i.16xlarge",
"ram": 131072,
},
"c6i.24xlarge": {
"bandwidth": 37500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "30000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.24xlarge",
"instancesku": "EHJ3EEC56AV94428",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "37500 Megabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "c6i.24xlarge",
"name": "c6i.24xlarge",
"ram": 196608,
},
"c6i.2xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.2xlarge",
"instancesku": "89MWKQGZ92TJDSQZ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "c6i.2xlarge",
"name": "c6i.2xlarge",
"ram": 16384,
},
"c6i.32xlarge": {
"bandwidth": 50000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.32xlarge",
"instancesku": "QNUQSM3BUHRHUWTU",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "50000 Megabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "c6i.32xlarge",
"name": "c6i.32xlarge",
"ram": 262144,
},
"c6i.4xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.4xlarge",
"instancesku": "4H74EXBA3BH2E77C",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "c6i.4xlarge",
"name": "c6i.4xlarge",
"ram": 32768,
},
"c6i.8xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "12500 Megabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "c6i.8xlarge",
"name": "c6i.8xlarge",
"ram": 65536,
},
"c6i.large": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "c6i.large",
"name": "c6i.large",
"ram": 4096,
},
"c6i.xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c6i.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "c6i.xlarge",
"name": "c6i.xlarge",
"ram": 8192,
},
"cc2.8xlarge": {
"bandwidth": 10,
"disk": 3360,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.6 GHz",
"currentGeneration": "No",
"ecu": "88",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Compute optimized",
"instanceType": "cc2.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "60.5 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 840 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "cc2.8xlarge",
"name": "cc2.8xlarge",
"ram": 61952,
},
"cr1.8xlarge": {
"bandwidth": 10,
"disk": 240,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "88",
"instanceFamily": "Memory optimized",
"instanceType": "cr1.8xlarge",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 120 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "cr1.8xlarge",
"name": "cr1.8xlarge",
"ram": 249856,
},
"d2.2xlarge": {
"bandwidth": None,
"disk": 12000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "28",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.2xlarge",
"instancesku": "MMEKDXY58RD4JWKV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "6 x 2000 HDD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "d2.2xlarge",
"name": "d2.2xlarge",
"ram": 62464,
},
"d2.4xlarge": {
"bandwidth": None,
"disk": 24000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2000 Mbps",
"ecu": "56",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "12 x 2000 HDD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "d2.4xlarge",
"name": "d2.4xlarge",
"ram": 124928,
},
"d2.8xlarge": {
"bandwidth": 10,
"disk": 48000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4000 Mbps",
"ecu": "116",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "24 x 2000 HDD",
"vcpu": "36",
"vpcnetworkingsupport": "true",
},
"id": "d2.8xlarge",
"name": "d2.8xlarge",
"ram": 249856,
},
"d2.xlarge": {
"bandwidth": None,
"disk": 6000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "14",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "3 x 2000 HDD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "d2.xlarge",
"name": "d2.xlarge",
"ram": 31232,
},
"d3.2xlarge": {
"bandwidth": 15,
"disk": 12000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2800 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3.2xlarge",
"instancesku": "5TGGFNPG4M6GTUKU",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 15 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "6 x 2000 HDD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "d3.2xlarge",
"name": "d3.2xlarge",
"ram": 65536,
},
"d3.4xlarge": {
"bandwidth": 15,
"disk": 24000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2800 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3.4xlarge",
"instancesku": "4DVAYKJPKD2C77C6",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 15 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "12 x 2000 HDD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "d3.4xlarge",
"name": "d3.4xlarge",
"ram": 131072,
},
"d3.8xlarge": {
"bandwidth": 25,
"disk": 48000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "24 x 2000 HDD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "d3.8xlarge",
"name": "d3.8xlarge",
"ram": 262144,
},
"d3.xlarge": {
"bandwidth": 15,
"disk": 6000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2800 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3.xlarge",
"instancesku": "E6KXGC3UNB34ACFA",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 15 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "3 x 2000 HDD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "d3.xlarge",
"name": "d3.xlarge",
"ram": 32768,
},
"d3en.12xlarge": {
"bandwidth": 75,
"disk": 336000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3en.12xlarge",
"instancesku": "2DB47P5DFHGJQV9E",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "24 x 14000 HDD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "d3en.12xlarge",
"name": "d3en.12xlarge",
"ram": 196608,
},
"d3en.2xlarge": {
"bandwidth": 25,
"disk": 56000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2800 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3en.2xlarge",
"instancesku": "PYH7JAKYH826PKNQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 14000 HDD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "d3en.2xlarge",
"name": "d3en.2xlarge",
"ram": 32768,
},
"d3en.4xlarge": {
"bandwidth": 25,
"disk": 112000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2800 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3en.4xlarge",
"instancesku": "V9KYSP3C455JHSEV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 14000 HDD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "d3en.4xlarge",
"name": "d3en.4xlarge",
"ram": 65536,
},
"d3en.6xlarge": {
"bandwidth": 40,
"disk": 168000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3en.6xlarge",
"instancesku": "CYYSUVKDC5DVFZCB",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "40 Gigabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "12 x 14000 HDD",
"vcpu": "24",
"vpcnetworkingsupport": "true",
},
"id": "d3en.6xlarge",
"name": "d3en.6xlarge",
"ram": 98304,
},
"d3en.8xlarge": {
"bandwidth": 50,
"disk": 224000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3en.8xlarge",
"instancesku": "KC4ZMWJZMF9KCPSJ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "16 x 14000 HDD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "d3en.8xlarge",
"name": "d3en.8xlarge",
"ram": 131072,
},
"d3en.xlarge": {
"bandwidth": 25,
"disk": 28000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2800 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d3en.xlarge",
"instancesku": "9XAK7T9XR83GCZVV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 14000 HDD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "d3en.xlarge",
"name": "d3en.xlarge",
"ram": 16384,
},
"dl1.24xlarge": {
"bandwidth": 400,
"disk": 4000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "dl1.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "400 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 1000 GB NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "dl1.24xlarge",
"name": "dl1.24xlarge",
"ram": 786432,
},
"f1.16xlarge": {
"bandwidth": 25,
"disk": 3760,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "FPGA Instances",
"instanceType": "f1.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "976 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 940 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "f1.16xlarge",
"name": "f1.16xlarge",
"ram": 999424,
},
"f1.2xlarge": {
"bandwidth": 10,
"disk": 470,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1700 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "FPGA Instances",
"instanceType": "f1.2xlarge",
"instancesku": "MXDPYTEVHNYM6QTA",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 470 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "f1.2xlarge",
"name": "f1.2xlarge",
"ram": 124928,
},
"f1.4xlarge": {
"bandwidth": 10,
"disk": 940,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "FPGA Instances",
"instanceType": "f1.4xlarge",
"instancesku": "6YUJAEPDDF47NZUQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 940 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "f1.4xlarge",
"name": "f1.4xlarge",
"ram": 249856,
},
"g2.2xlarge": {
"bandwidth": None,
"disk": 60,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.6 GHz",
"currentGeneration": "No",
"ecu": "26",
"enhancedNetworkingSupported": "No",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g2.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 (Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 60 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "g2.2xlarge",
"name": "g2.2xlarge",
"ram": 15360,
},
"g2.8xlarge": {
"bandwidth": None,
"disk": 240,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.6 GHz",
"currentGeneration": "No",
"ecu": "104",
"enhancedNetworkingSupported": "No",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g2.8xlarge",
"instancesku": "GP54RNQ37B9N43MJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "60 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670 (Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 120 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "g2.8xlarge",
"name": "g2.8xlarge",
"ram": 61440,
},
"g3.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g3.16xlarge",
"instancesku": "9JX6W8YKTGAXCFNB",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "488 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "g3.16xlarge",
"name": "g3.16xlarge",
"ram": 499712,
},
"g3.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g3.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "g3.4xlarge",
"name": "g3.4xlarge",
"ram": 124928,
},
"g3.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"gpu": "2",
"instanceFamily": "GPU instance",
"instanceType": "g3.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "g3.8xlarge",
"name": "g3.8xlarge",
"ram": 249856,
},
"g3s.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "100 Mbps",
"ecu": "13",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g3s.xlarge",
"instancesku": "H33F2YFNF92GTBPB",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "30.5 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "g3s.xlarge",
"name": "g3s.xlarge",
"ram": 31232,
},
"g4ad.16xlarge": {
"bandwidth": 25,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g4ad.16xlarge",
"instancesku": "UTWC8VBRTBSXJYDB",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2400 GB NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "g4ad.16xlarge",
"name": "g4ad.16xlarge",
"ram": 262144,
},
"g4ad.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4ad.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "19.97394",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "300 GB NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "g4ad.2xlarge",
"name": "g4ad.2xlarge",
"ram": 32768,
},
"g4ad.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4ad.4xlarge",
"instancesku": "9VMBVH3KEV82T8ZW",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "600 GB NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "g4ad.4xlarge",
"name": "g4ad.4xlarge",
"ram": 65536,
},
"g4ad.8xlarge": {
"bandwidth": 15,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "2",
"instanceFamily": "GPU instance",
"instanceType": "g4ad.8xlarge",
"instancesku": "D6FUGEGN8P4Q7E97",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "15 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1200 GB NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "g4ad.8xlarge",
"name": "g4ad.8xlarge",
"ram": 131072,
},
"g4ad.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4ad.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "13.97113",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "150 GB NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "g4ad.xlarge",
"name": "g4ad.xlarge",
"ram": 16384,
},
"g4dn.12xlarge": {
"bandwidth": 50,
"disk": 900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.12xlarge",
"instancesku": "5GY7VGBKXNCWB5JE",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "900 GB NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "g4dn.12xlarge",
"name": "g4dn.12xlarge",
"ram": 196608,
},
"g4dn.16xlarge": {
"bandwidth": 50,
"disk": 900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "900 GB NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "g4dn.16xlarge",
"name": "g4dn.16xlarge",
"ram": 262144,
},
"g4dn.2xlarge": {
"bandwidth": 25,
"disk": 225,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.2xlarge",
"instancesku": "K24CAS4K6W4HJXN8",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "225 GB NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "g4dn.2xlarge",
"name": "g4dn.2xlarge",
"ram": 32768,
},
"g4dn.4xlarge": {
"bandwidth": 25,
"disk": 225,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.4xlarge",
"instancesku": "YMZGCYKDK8626BJT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "225 GB NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "g4dn.4xlarge",
"name": "g4dn.4xlarge",
"ram": 65536,
},
"g4dn.8xlarge": {
"bandwidth": 50,
"disk": 900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.8xlarge",
"instancesku": "AE5GUKCAC8GSBSBD",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "900 GB NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "g4dn.8xlarge",
"name": "g4dn.8xlarge",
"ram": 131072,
},
"g4dn.xlarge": {
"bandwidth": 25,
"disk": 125,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.xlarge",
"instancesku": "V5NBPKWB9EBYQAHY",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "125 GB NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "g4dn.xlarge",
"name": "g4dn.xlarge",
"ram": 16384,
},
"g5.12xlarge": {
"bandwidth": 40,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "16000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g5.12xlarge",
"instancesku": "3P84N5G49RGRCA2G",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "40 Gigabit",
"normalizationSizeFactor": "5.638170974",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 3800 GB NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "g5.12xlarge",
"name": "g5.12xlarge",
"ram": 196608,
},
"g5.16xlarge": {
"bandwidth": 25,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "16000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5.16xlarge",
"instancesku": "F3N4EHRDPFKVGZVK",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "4.071570577",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 GB NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "g5.16xlarge",
"name": "g5.16xlarge",
"ram": 262144,
},
"g5.24xlarge": {
"bandwidth": 50,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g5.24xlarge",
"instancesku": "TWR57R4QNGWT3RVZ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "8.095427435",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 3800 GB NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "g5.24xlarge",
"name": "g5.24xlarge",
"ram": 393216,
},
"g5.2xlarge": {
"bandwidth": 10,
"disk": 450,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5.2xlarge",
"instancesku": "G8UNADVF5KSHRNAY",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "1.204771372",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 450 GB NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "g5.2xlarge",
"name": "g5.2xlarge",
"ram": 32768,
},
"g5.48xlarge": {
"bandwidth": 100,
"disk": 7600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "g5.48xlarge",
"instancesku": "J32K4AFZQCJURK5X",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "16.190854871",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 3800 GB NVMe SSD",
"vcpu": "192",
"vpcnetworkingsupport": "true",
},
"id": "g5.48xlarge",
"name": "g5.48xlarge",
"ram": 786432,
},
"g5.4xlarge": {
"bandwidth": 25,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "8000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5.4xlarge",
"instancesku": "UYEFWMH5HQA8HJEJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "1.614314115",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 600 GB NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "g5.4xlarge",
"name": "g5.4xlarge",
"ram": 65536,
},
"g5.8xlarge": {
"bandwidth": 25,
"disk": 900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "16000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "2.433399602",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 900 GB NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "g5.8xlarge",
"name": "g5.8xlarge",
"ram": 131072,
},
"g5.xlarge": {
"bandwidth": 10,
"disk": 250,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.8 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "1",
"physicalProcessor": "AMD EPYC 7R32",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 250 GB NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "g5.xlarge",
"name": "g5.xlarge",
"ram": 16384,
},
"g5g.16xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "2",
"instanceFamily": "GPU instance",
"instanceType": "g5g.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "6.533333333",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "g5g.16xlarge",
"name": "g5g.16xlarge",
"ram": 131072,
},
"g5g.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5g.2xlarge",
"instancesku": "BEKGBHKHHCV8FAHP",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "1.323809524",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "g5g.2xlarge",
"name": "g5g.2xlarge",
"ram": 16384,
},
"g5g.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5g.4xlarge",
"instancesku": "YH5UKC6MHJXTDWUT",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "1.971428571",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "g5g.4xlarge",
"name": "g5g.4xlarge",
"ram": 32768,
},
"g5g.8xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5g.8xlarge",
"instancesku": "R75EJCQK9ATE2ZEP",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 12 Gigabit",
"normalizationSizeFactor": "3.266666667",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "g5g.8xlarge",
"name": "g5g.8xlarge",
"ram": 65536,
},
"g5g.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g5g.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "1",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "g5g.xlarge",
"name": "g5g.xlarge",
"ram": 8192,
},
"h1.16xlarge": {
"bandwidth": 25,
"disk": 16000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.16xlarge",
"instancesku": "42A6HHN38X5E4VKA",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 2000 HDD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "h1.16xlarge",
"name": "h1.16xlarge",
"ram": 262144,
},
"h1.2xlarge": {
"bandwidth": 10,
"disk": 2000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 2000 HDD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "h1.2xlarge",
"name": "h1.2xlarge",
"ram": 32768,
},
"h1.4xlarge": {
"bandwidth": 10,
"disk": 4000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.4xlarge",
"instancesku": "NGJJD4N26GD3FZRX",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 2000 HDD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "h1.4xlarge",
"name": "h1.4xlarge",
"ram": 65536,
},
"h1.8xlarge": {
"bandwidth": 10,
"disk": 8000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.8xlarge",
"instancesku": "WH74JE96HBFW3TTP",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 2000 HDD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "h1.8xlarge",
"name": "h1.8xlarge",
"ram": 131072,
},
"hpc6a.48xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "hpc6a.48xlarge",
"instancesku": "SRS8RRSGRCT6JN7N",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "384",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "hpc6a.48xlarge",
"name": "hpc6a.48xlarge",
"ram": 393216,
},
"hs1.8xlarge": {
"bandwidth": 10,
"disk": 48000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2 GHz",
"currentGeneration": "No",
"ecu": "35",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Storage optimized",
"instanceType": "hs1.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "117 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2650",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "24 x 2000 HDD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "hs1.8xlarge",
"name": "hs1.8xlarge",
"ram": 119808,
},
"i2.2xlarge": {
"bandwidth": None,
"disk": 1600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1500 Mbps",
"ecu": "27",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 800 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "i2.2xlarge",
"name": "i2.2xlarge",
"ram": 62464,
},
"i2.4xlarge": {
"bandwidth": None,
"disk": 3200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1500 Mbps",
"ecu": "53",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.4xlarge",
"instancesku": "Y9Z5DVR4T6JJHPDH",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 800 SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "i2.4xlarge",
"name": "i2.4xlarge",
"ram": 124928,
},
"i2.8xlarge": {
"bandwidth": 10,
"disk": 6400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1500 Mbps",
"ecu": "104",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 800 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "i2.8xlarge",
"name": "i2.8xlarge",
"ram": 249856,
},
"i2.large": {
"bandwidth": None,
"disk": 800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "7",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.large",
"instancesku": "DNMJX39QXYR342B4",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 800 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "i2.large",
"name": "i2.large",
"ram": 15360,
},
"i2.xlarge": {
"bandwidth": None,
"disk": 800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "14",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 800 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "i2.xlarge",
"name": "i2.xlarge",
"ram": 31232,
},
"i3.16xlarge": {
"bandwidth": 20,
"disk": 15200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "488 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 1900 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "i3.16xlarge",
"name": "i3.16xlarge",
"ram": 499712,
},
"i3.2xlarge": {
"bandwidth": 10,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "61 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "i3.2xlarge",
"name": "i3.2xlarge",
"ram": 62464,
},
"i3.4xlarge": {
"bandwidth": 10,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.4xlarge",
"instancesku": "A4Y3Z2HDYDB9Y278",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "i3.4xlarge",
"name": "i3.4xlarge",
"ram": 124928,
},
"i3.8xlarge": {
"bandwidth": 10,
"disk": 7600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 1900 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "i3.8xlarge",
"name": "i3.8xlarge",
"ram": 249856,
},
"i3.large": {
"bandwidth": 10,
"disk": 475,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "425 Mbps",
"ecu": "8",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15.25 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 475 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "i3.large",
"name": "i3.large",
"ram": 15616,
},
"i3.xlarge": {
"bandwidth": 10,
"disk": 950,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "850 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.xlarge",
"instancesku": "DQBUA3WF2ZKB94XE",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30.5 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 950 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "i3.xlarge",
"name": "i3.xlarge",
"ram": 31232,
},
"i3en.12xlarge": {
"bandwidth": 50,
"disk": 30000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.12xlarge",
"instancesku": "9JQFRDVY79XEF6QA",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 7500 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "i3en.12xlarge",
"name": "i3en.12xlarge",
"ram": 393216,
},
"i3en.24xlarge": {
"bandwidth": 100,
"disk": 60000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 7500 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "i3en.24xlarge",
"name": "i3en.24xlarge",
"ram": 786432,
},
"i3en.2xlarge": {
"bandwidth": 25,
"disk": 5000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 2500 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "i3en.2xlarge",
"name": "i3en.2xlarge",
"ram": 65536,
},
"i3en.3xlarge": {
"bandwidth": 25,
"disk": 7500,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.3xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "24",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 7500 NVMe SSD",
"vcpu": "12",
"vpcnetworkingsupport": "true",
},
"id": "i3en.3xlarge",
"name": "i3en.3xlarge",
"ram": 98304,
},
"i3en.6xlarge": {
"bandwidth": 25,
"disk": 15000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.6xlarge",
"instancesku": "JFUSUE6PEJ7BEWFW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 7500 NVMe SSD",
"vcpu": "24",
"vpcnetworkingsupport": "true",
},
"id": "i3en.6xlarge",
"name": "i3en.6xlarge",
"ram": 196608,
},
"i3en.large": {
"bandwidth": 25,
"disk": 1250,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.large",
"instancesku": "9KJJPTN45B7DSGAW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1250 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "i3en.large",
"name": "i3en.large",
"ram": 16384,
},
"i3en.xlarge": {
"bandwidth": 25,
"disk": 2500,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 2500 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "i3en.xlarge",
"name": "i3en.xlarge",
"ram": 32768,
},
"im4gn.16xlarge": {
"bandwidth": 100,
"disk": 30000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "38000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "im4gn.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 7500 SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "im4gn.16xlarge",
"name": "im4gn.16xlarge",
"ram": 262144,
},
"im4gn.2xlarge": {
"bandwidth": 25,
"disk": 3750,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "im4gn.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 3750 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "im4gn.2xlarge",
"name": "im4gn.2xlarge",
"ram": 32768,
},
"im4gn.4xlarge": {
"bandwidth": 25,
"disk": 7500,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "im4gn.4xlarge",
"instancesku": "Z4AACXK82W7PMS2C",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 7500 SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "im4gn.4xlarge",
"name": "im4gn.4xlarge",
"ram": 65536,
},
"im4gn.8xlarge": {
"bandwidth": 50,
"disk": 15000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "im4gn.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 7500 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "im4gn.8xlarge",
"name": "im4gn.8xlarge",
"ram": 131072,
},
"im4gn.large": {
"bandwidth": 25,
"disk": 937,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "im4gn.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 937 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "im4gn.large",
"name": "im4gn.large",
"ram": 8192,
},
"im4gn.xlarge": {
"bandwidth": 25,
"disk": 1875,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "im4gn.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1875 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "im4gn.xlarge",
"name": "im4gn.xlarge",
"ram": 16384,
},
"inf1.24xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "inf1.24xlarge",
"name": "inf1.24xlarge",
"ram": 196608,
},
"inf1.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "875 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "inf1.2xlarge",
"name": "inf1.2xlarge",
"ram": 16384,
},
"inf1.6xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.6xlarge",
"instancesku": "JE9QJJXKVRAHGWSA",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "48 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "24",
"vpcnetworkingsupport": "true",
},
"id": "inf1.6xlarge",
"name": "inf1.6xlarge",
"ram": 49152,
},
"inf1.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "875 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "inf1.xlarge",
"name": "inf1.xlarge",
"ram": 8192,
},
"is4gen.2xlarge": {
"bandwidth": 25,
"disk": 7500,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "is4gen.2xlarge",
"instancesku": "PYBA9A2K37U7ZG6W",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "48 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 7500 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "is4gen.2xlarge",
"name": "is4gen.2xlarge",
"ram": 49152,
},
"is4gen.4xlarge": {
"bandwidth": 25,
"disk": 15000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "is4gen.4xlarge",
"instancesku": "M3EBG2GMJV59A3WH",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 7500 SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "is4gen.4xlarge",
"name": "is4gen.4xlarge",
"ram": 98304,
},
"is4gen.8xlarge": {
"bandwidth": 50,
"disk": 30000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "is4gen.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 7500 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "is4gen.8xlarge",
"name": "is4gen.8xlarge",
"ram": 196608,
},
"is4gen.large": {
"bandwidth": 25,
"disk": 1875,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "is4gen.large",
"instancesku": "RECJM4K3QDDW4JJA",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "12 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1875 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "is4gen.large",
"name": "is4gen.large",
"ram": 12288,
},
"is4gen.medium": {
"bandwidth": 25,
"disk": 937,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "is4gen.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "6 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 937 SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "is4gen.medium",
"name": "is4gen.medium",
"ram": 6144,
},
"is4gen.xlarge": {
"bandwidth": 25,
"disk": 3750,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "is4gen.xlarge",
"instancesku": "5T6AZUVH62HV3BMG",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "24 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 3750 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "is4gen.xlarge",
"name": "is4gen.xlarge",
"ram": 24576,
},
"m1.large": {
"bandwidth": None,
"disk": 840,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "4",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m1.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "7.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 420 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m1.large",
"name": "m1.large",
"ram": 7680,
},
"m1.medium": {
"bandwidth": None,
"disk": 410,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "2",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m1.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 410 SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "m1.medium",
"name": "m1.medium",
"ram": 3840,
},
"m1.small": {
"bandwidth": None,
"disk": 160,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "1",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m1.small",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "1.7 GiB",
"networkPerformance": "Low",
"normalizationSizeFactor": "1",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 160 SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "m1.small",
"name": "m1.small",
"ram": 1740,
},
"m1.xlarge": {
"bandwidth": None,
"disk": 1680,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "8",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m1.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 420 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m1.xlarge",
"name": "m1.xlarge",
"ram": 15360,
},
"m2.2xlarge": {
"bandwidth": None,
"disk": 850,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "13",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "m2.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "34.2 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 850 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m2.2xlarge",
"name": "m2.2xlarge",
"ram": 35020,
},
"m2.4xlarge": {
"bandwidth": None,
"disk": 1680,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "26",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "m2.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "68.4 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 840 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m2.4xlarge",
"name": "m2.4xlarge",
"ram": 70041,
},
"m2.xlarge": {
"bandwidth": None,
"disk": 420,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"currentGeneration": "No",
"ecu": "6.5",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "m2.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "17.1 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 420 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m2.xlarge",
"name": "m2.xlarge",
"ram": 17510,
},
"m3.2xlarge": {
"bandwidth": None,
"disk": 160,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "26",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 80 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m3.2xlarge",
"name": "m3.2xlarge",
"ram": 30720,
},
"m3.large": {
"bandwidth": None,
"disk": 32,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "6.5",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "7.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 32 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m3.large",
"name": "m3.large",
"ram": 7680,
},
"m3.medium": {
"bandwidth": None,
"disk": 4,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "3",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 4 SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "m3.medium",
"name": "m3.medium",
"ram": 3840,
},
"m3.xlarge": {
"bandwidth": None,
"disk": 80,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "500 Mbps",
"ecu": "13",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.xlarge",
"instancesku": "JCFYMXQEQ3BA57K3",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 40 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m3.xlarge",
"name": "m3.xlarge",
"ram": 15360,
},
"m4.10xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4000 Mbps",
"ecu": "124.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.10xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "160 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "80",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "40",
"vpcnetworkingsupport": "true",
},
"id": "m4.10xlarge",
"name": "m4.10xlarge",
"ram": 163840,
},
"m4.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "188",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.16xlarge",
"instancesku": "9RRW6TTJ2K77NSDQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m4.16xlarge",
"name": "m4.16xlarge",
"ram": 262144,
},
"m4.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "26",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.2xlarge",
"instancesku": "4DP9JW528SDT7HRV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m4.2xlarge",
"name": "m4.2xlarge",
"ram": 32768,
},
"m4.4xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2000 Mbps",
"ecu": "53.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.4xlarge",
"instancesku": "B5A2KQPVUB48X3ES",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m4.4xlarge",
"name": "m4.4xlarge",
"ram": 65536,
},
"m4.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "450 Mbps",
"ecu": "6.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m4.large",
"name": "m4.large",
"ram": 8192,
},
"m4.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "13",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m4.xlarge",
"name": "m4.xlarge",
"ram": 16384,
},
"m5.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.12xlarge",
"instancesku": "KKC6QHVHM678U3VX",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m5.12xlarge",
"name": "m5.12xlarge",
"ram": 196608,
},
"m5.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m5.16xlarge",
"name": "m5.16xlarge",
"ram": 262144,
},
"m5.24xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.24xlarge",
"instancesku": "SCEVK3MMHE5VFGJQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m5.24xlarge",
"name": "m5.24xlarge",
"ram": 393216,
},
"m5.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.2xlarge",
"instancesku": "7DHN2N5PWU2GRR8X",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m5.2xlarge",
"name": "m5.2xlarge",
"ram": 32768,
},
"m5.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m5.4xlarge",
"name": "m5.4xlarge",
"ram": 65536,
},
"m5.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.8xlarge",
"instancesku": "87SQ6EDEM9K5VNZQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m5.8xlarge",
"name": "m5.8xlarge",
"ram": 131072,
},
"m5.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.large",
"instancesku": "82KMZB2MXBZ899J2",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m5.large",
"name": "m5.large",
"ram": 8192,
},
"m5.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m5.xlarge",
"name": "m5.xlarge",
"ram": 16384,
},
"m5a.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m5a.12xlarge",
"name": "m5a.12xlarge",
"ram": 196608,
},
"m5a.16xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m5a.16xlarge",
"name": "m5a.16xlarge",
"ram": 262144,
},
"m5a.24xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m5a.24xlarge",
"name": "m5a.24xlarge",
"ram": 393216,
},
"m5a.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.2xlarge",
"instancesku": "NP557QB7VFTC48QX",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m5a.2xlarge",
"name": "m5a.2xlarge",
"ram": 32768,
},
"m5a.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.4xlarge",
"instancesku": "YRM7GSGQEF9HKS2Q",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m5a.4xlarge",
"name": "m5a.4xlarge",
"ram": 65536,
},
"m5a.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m5a.8xlarge",
"name": "m5a.8xlarge",
"ram": 131072,
},
"m5a.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.large",
"instancesku": "JP2HVYDRPYGCX6DK",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m5a.large",
"name": "m5a.large",
"ram": 8192,
},
"m5a.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m5a.xlarge",
"name": "m5a.xlarge",
"ram": 16384,
},
"m5ad.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.12xlarge",
"name": "m5ad.12xlarge",
"ram": 196608,
},
"m5ad.16xlarge": {
"bandwidth": 12,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.16xlarge",
"name": "m5ad.16xlarge",
"ram": 262144,
},
"m5ad.24xlarge": {
"bandwidth": 20,
"disk": 3600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.24xlarge",
"instancesku": "R24J62ZT83FRAWE4",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.24xlarge",
"name": "m5ad.24xlarge",
"ram": 393216,
},
"m5ad.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.2xlarge",
"instancesku": "NC7JAPBAQR9N6AKC",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.2xlarge",
"name": "m5ad.2xlarge",
"ram": 32768,
},
"m5ad.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.4xlarge",
"instancesku": "B42N5BR67T9VQCUZ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.4xlarge",
"name": "m5ad.4xlarge",
"ram": 65536,
},
"m5ad.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.8xlarge",
"name": "m5ad.8xlarge",
"ram": 131072,
},
"m5ad.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.large",
"name": "m5ad.large",
"ram": 8192,
},
"m5ad.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.xlarge",
"instancesku": "746NNG5CG6EEY9BJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m5ad.xlarge",
"name": "m5ad.xlarge",
"ram": 16384,
},
"m5d.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.12xlarge",
"instancesku": "UBWKCRY37QGF892F",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m5d.12xlarge",
"name": "m5d.12xlarge",
"ram": 196608,
},
"m5d.16xlarge": {
"bandwidth": 20,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.16xlarge",
"instancesku": "2AW5G4CMNCDS44AT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m5d.16xlarge",
"name": "m5d.16xlarge",
"ram": 262144,
},
"m5d.24xlarge": {
"bandwidth": 25,
"disk": 3600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m5d.24xlarge",
"name": "m5d.24xlarge",
"ram": 393216,
},
"m5d.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m5d.2xlarge",
"name": "m5d.2xlarge",
"ram": 32768,
},
"m5d.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.4xlarge",
"instancesku": "XHECG69V2ZSYT8UV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m5d.4xlarge",
"name": "m5d.4xlarge",
"ram": 65536,
},
"m5d.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.8xlarge",
"instancesku": "2C2YGPB7B4MPSHV7",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m5d.8xlarge",
"name": "m5d.8xlarge",
"ram": 131072,
},
"m5d.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.large",
"instancesku": "GJE7QRBNDAYYEUHG",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m5d.large",
"name": "m5d.large",
"ram": 8192,
},
"m5d.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.xlarge",
"instancesku": "GKEVTCW4GXTX52AW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m5d.xlarge",
"name": "m5d.xlarge",
"ram": 16384,
},
"m5dn.12xlarge": {
"bandwidth": 50,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 GB NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.12xlarge",
"name": "m5dn.12xlarge",
"ram": 196608,
},
"m5dn.16xlarge": {
"bandwidth": 75,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.16xlarge",
"name": "m5dn.16xlarge",
"ram": 262144,
},
"m5dn.24xlarge": {
"bandwidth": 100,
"disk": 3600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.24xlarge",
"instancesku": "G9H536RMY737GEM4",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.24xlarge",
"name": "m5dn.24xlarge",
"ram": 393216,
},
"m5dn.2xlarge": {
"bandwidth": 25,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.2xlarge",
"instancesku": "3WZA3VEJ8QD5UJBE",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.2xlarge",
"name": "m5dn.2xlarge",
"ram": 32768,
},
"m5dn.4xlarge": {
"bandwidth": 25,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.4xlarge",
"instancesku": "3Y5Q72YU4JNPU6S5",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.4xlarge",
"name": "m5dn.4xlarge",
"ram": 65536,
},
"m5dn.8xlarge": {
"bandwidth": 25,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.8xlarge",
"instancesku": "N53VJWC86E4NYNWY",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.8xlarge",
"name": "m5dn.8xlarge",
"ram": 131072,
},
"m5dn.large": {
"bandwidth": 25,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.large",
"instancesku": "SVUJNCR3NN2359RD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.large",
"name": "m5dn.large",
"ram": 8192,
},
"m5dn.xlarge": {
"bandwidth": 25,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m5dn.xlarge",
"name": "m5dn.xlarge",
"ram": 16384,
},
"m5n.12xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.12xlarge",
"instancesku": "BEXX757YG98NCW3T",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m5n.12xlarge",
"name": "m5n.12xlarge",
"ram": 196608,
},
"m5n.16xlarge": {
"bandwidth": 75,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.16xlarge",
"instancesku": "88HNSFX9MCY6YAC3",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m5n.16xlarge",
"name": "m5n.16xlarge",
"ram": 262144,
},
"m5n.24xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m5n.24xlarge",
"name": "m5n.24xlarge",
"ram": 393216,
},
"m5n.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m5n.2xlarge",
"name": "m5n.2xlarge",
"ram": 32768,
},
"m5n.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.4xlarge",
"instancesku": "ZH4K42CV65578MRJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m5n.4xlarge",
"name": "m5n.4xlarge",
"ram": 65536,
},
"m5n.8xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.8xlarge",
"instancesku": "B9H9FBY9ZKWXQT4E",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m5n.8xlarge",
"name": "m5n.8xlarge",
"ram": 131072,
},
"m5n.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m5n.large",
"name": "m5n.large",
"ram": 8192,
},
"m5n.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m5n.xlarge",
"name": "m5n.xlarge",
"ram": 16384,
},
"m5zn.12xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5zn.12xlarge",
"instancesku": "USMQJ4AAYGRVZ5AP",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m5zn.12xlarge",
"name": "m5zn.12xlarge",
"ram": 196608,
},
"m5zn.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5zn.2xlarge",
"instancesku": "AKY928WUU3BEMP5B",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m5zn.2xlarge",
"name": "m5zn.2xlarge",
"ram": 32768,
},
"m5zn.3xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5zn.3xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "48 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "24",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "12",
"vpcnetworkingsupport": "true",
},
"id": "m5zn.3xlarge",
"name": "m5zn.3xlarge",
"ram": 49152,
},
"m5zn.6xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5zn.6xlarge",
"instancesku": "UKFGCB9AHS4FY7V6",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "24",
"vpcnetworkingsupport": "true",
},
"id": "m5zn.6xlarge",
"name": "m5zn.6xlarge",
"ram": 98304,
},
"m5zn.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5zn.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m5zn.large",
"name": "m5zn.large",
"ram": 8192,
},
"m5zn.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5zn.xlarge",
"instancesku": "Z5KETQXBVZT4M8PQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m5zn.xlarge",
"name": "m5zn.xlarge",
"ram": 16384,
},
"m6a.12xlarge": {
"bandwidth": 18750,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.12xlarge",
"instancesku": "YKE5F4C8QZ6A93TT",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "18750 Megabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m6a.12xlarge",
"name": "m6a.12xlarge",
"ram": 196608,
},
"m6a.16xlarge": {
"bandwidth": 25000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13333 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25000 Megabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m6a.16xlarge",
"name": "m6a.16xlarge",
"ram": 262144,
},
"m6a.24xlarge": {
"bandwidth": 37500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.24xlarge",
"instancesku": "RFDGG9TMUYCGHU7A",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "37500 Megabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m6a.24xlarge",
"name": "m6a.24xlarge",
"ram": 393216,
},
"m6a.2xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.2xlarge",
"instancesku": "QSNDQNYATNQBJTEP",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m6a.2xlarge",
"name": "m6a.2xlarge",
"ram": 32768,
},
"m6a.32xlarge": {
"bandwidth": 50000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "26667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.32xlarge",
"instancesku": "QMYPDZTD3PT7XZ36",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "50000 Megabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "m6a.32xlarge",
"name": "m6a.32xlarge",
"ram": 524288,
},
"m6a.48xlarge": {
"bandwidth": 50000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.48xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "50000 Megabit",
"normalizationSizeFactor": "384",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "192",
"vpcnetworkingsupport": "true",
},
"id": "m6a.48xlarge",
"name": "m6a.48xlarge",
"ram": 786432,
},
"m6a.4xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.4xlarge",
"instancesku": "F4F8C3C86FW24773",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m6a.4xlarge",
"name": "m6a.4xlarge",
"ram": 65536,
},
"m6a.8xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.8xlarge",
"instancesku": "TP4UNYCCECX5SY5U",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "12500 Megabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m6a.8xlarge",
"name": "m6a.8xlarge",
"ram": 131072,
},
"m6a.large": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.large",
"instancesku": "JDDBHHJ5RWUNBQUG",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m6a.large",
"name": "m6a.large",
"ram": 8192,
},
"m6a.xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.95 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 6667 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6a.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7R13 Processor",
"processorArchitecture": "64-bit",
"processorFeatures": "AMD Turbo; AVX; AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m6a.xlarge",
"name": "m6a.xlarge",
"ram": 16384,
},
"m6g.12xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.12xlarge",
"instancesku": "QD2WPXWQ5TSZ32UD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m6g.12xlarge",
"name": "m6g.12xlarge",
"ram": 196608,
},
"m6g.16xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "18000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m6g.16xlarge",
"name": "m6g.16xlarge",
"ram": 262144,
},
"m6g.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2250 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.2xlarge",
"instancesku": "SX3J4P5RPTH53ZTN",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m6g.2xlarge",
"name": "m6g.2xlarge",
"ram": 32768,
},
"m6g.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m6g.4xlarge",
"name": "m6g.4xlarge",
"ram": 65536,
},
"m6g.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m6g.8xlarge",
"name": "m6g.8xlarge",
"ram": 131072,
},
"m6g.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "600 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m6g.large",
"name": "m6g.large",
"ram": 8192,
},
"m6g.medium": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "300 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "m6g.medium",
"name": "m6g.medium",
"ram": 4096,
},
"m6g.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1125 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6g.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m6g.xlarge",
"name": "m6g.xlarge",
"ram": 16384,
},
"m6gd.12xlarge": {
"bandwidth": 20,
"disk": 2850,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1425 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.12xlarge",
"name": "m6gd.12xlarge",
"ram": 196608,
},
"m6gd.16xlarge": {
"bandwidth": 25,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "18000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.16xlarge",
"name": "m6gd.16xlarge",
"ram": 262144,
},
"m6gd.2xlarge": {
"bandwidth": 10,
"disk": 475,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.2xlarge",
"instancesku": "MEPDU66RSS2VZTBP",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 475 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.2xlarge",
"name": "m6gd.2xlarge",
"ram": 32768,
},
"m6gd.4xlarge": {
"bandwidth": 10,
"disk": 950,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.4xlarge",
"instancesku": "KPBAA5MV8872F65A",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 950 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.4xlarge",
"name": "m6gd.4xlarge",
"ram": 65536,
},
"m6gd.8xlarge": {
"bandwidth": 10,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.8xlarge",
"instancesku": "HKW2CNJU63NTP3VD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.8xlarge",
"name": "m6gd.8xlarge",
"ram": 131072,
},
"m6gd.large": {
"bandwidth": 10,
"disk": 118,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 118 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.large",
"name": "m6gd.large",
"ram": 8192,
},
"m6gd.medium": {
"bandwidth": 10,
"disk": 59,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.medium",
"instancesku": "F82GDBMZZ9YEY6AD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 59 NVMe SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.medium",
"name": "m6gd.medium",
"ram": 4096,
},
"m6gd.xlarge": {
"bandwidth": 10,
"disk": 237,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6gd.xlarge",
"instancesku": "2EN2S7FJ96KP53AE",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 237 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m6gd.xlarge",
"name": "m6gd.xlarge",
"ram": 16384,
},
"m6i.12xlarge": {
"bandwidth": 18750,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "15000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.12xlarge",
"instancesku": "XWT95UWVSUQJ3HFY",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "18750 Megabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "m6i.12xlarge",
"name": "m6i.12xlarge",
"ram": 196608,
},
"m6i.16xlarge": {
"bandwidth": 25000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.16xlarge",
"instancesku": "FU3UWJAQS57D8DEW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25000 Megabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "m6i.16xlarge",
"name": "m6i.16xlarge",
"ram": 262144,
},
"m6i.24xlarge": {
"bandwidth": 37500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "30000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "37500 Megabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "m6i.24xlarge",
"name": "m6i.24xlarge",
"ram": 393216,
},
"m6i.2xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "m6i.2xlarge",
"name": "m6i.2xlarge",
"ram": 32768,
},
"m6i.32xlarge": {
"bandwidth": 50000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.32xlarge",
"instancesku": "J2589GXE2X4QGE87",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "50000 Megabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "m6i.32xlarge",
"name": "m6i.32xlarge",
"ram": 524288,
},
"m6i.4xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "m6i.4xlarge",
"name": "m6i.4xlarge",
"ram": 65536,
},
"m6i.8xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "12500 Megabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "m6i.8xlarge",
"name": "m6i.8xlarge",
"ram": 131072,
},
"m6i.large": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.large",
"instancesku": "3QKHM8YZH37EMM3S",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "m6i.large",
"name": "m6i.large",
"ram": 8192,
},
"m6i.xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m6i.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "m6i.xlarge",
"name": "m6i.xlarge",
"ram": 16384,
},
"p2.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"gpu": "16",
"instanceFamily": "GPU instance",
"instanceType": "p2.16xlarge",
"instancesku": "8NZJVJPUUPFQD5XT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "732 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "p2.16xlarge",
"name": "p2.16xlarge",
"ram": 749568,
},
"p2.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "p2.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "488 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "p2.8xlarge",
"name": "p2.8xlarge",
"ram": 499712,
},
"p2.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "p2.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "p2.xlarge",
"name": "p2.xlarge",
"ram": 62464,
},
"p3.16xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "p3.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "488 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "p3.16xlarge",
"name": "p3.16xlarge",
"ram": 499712,
},
"p3.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "p3.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "61 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "p3.2xlarge",
"name": "p3.2xlarge",
"ram": 62464,
},
"p3.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "p3.8xlarge",
"instancesku": "HCAHXPK7A9MBBZGN",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "p3.8xlarge",
"name": "p3.8xlarge",
"ram": 249856,
},
"p3dn.24xlarge": {
"bandwidth": 100,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "p3dn.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "p3dn.24xlarge",
"name": "p3dn.24xlarge",
"ram": 786432,
},
"p4d.24xlarge": {
"bandwidth": 400,
"disk": 8000,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "345",
"enhancedNetworkingSupported": "No",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "p4d.24xlarge",
"instancesku": "CASSTWDHBMWC5PPU",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1152 GiB",
"networkPerformance": "400 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 1000 SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "p4d.24xlarge",
"name": "p4d.24xlarge",
"ram": 1179648,
},
"r3.2xlarge": {
"bandwidth": None,
"disk": 160,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "26",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 160 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r3.2xlarge",
"name": "r3.2xlarge",
"ram": 62464,
},
"r3.4xlarge": {
"bandwidth": None,
"disk": 320,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "52",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 320 SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r3.4xlarge",
"name": "r3.4xlarge",
"ram": 124928,
},
"r3.8xlarge": {
"bandwidth": 10,
"disk": 640,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "104",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 320 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r3.8xlarge",
"name": "r3.8xlarge",
"ram": 249856,
},
"r3.large": {
"bandwidth": None,
"disk": 32,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "6.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.large",
"instancesku": "JXRNUMXAZNYRPC72",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15.25 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 32 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r3.large",
"name": "r3.large",
"ram": 15616,
},
"r3.xlarge": {
"bandwidth": None,
"disk": 80,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "true",
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "13",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.xlarge",
"instancesku": "XKREJETCK4Q363EE",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 80 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r3.xlarge",
"name": "r3.xlarge",
"ram": 31232,
},
"r4.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "488 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r4.16xlarge",
"name": "r4.16xlarge",
"ram": 499712,
},
"r4.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1600 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "61 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r4.2xlarge",
"name": "r4.2xlarge",
"ram": 62464,
},
"r4.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3000 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.4xlarge",
"instancesku": "C9S5QU5S5T9EP9E3",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r4.4xlarge",
"name": "r4.4xlarge",
"ram": 124928,
},
"r4.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r4.8xlarge",
"name": "r4.8xlarge",
"ram": 249856,
},
"r4.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "400 Mbps",
"ecu": "8",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.large",
"instancesku": "UR8ANZE227XY35VC",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "15.25 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r4.large",
"name": "r4.large",
"ram": 15616,
},
"r4.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "800 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.xlarge",
"instancesku": "YTUYTGV9WJQD2NAN",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "30.5 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r4.xlarge",
"name": "r4.xlarge",
"ram": 31232,
},
"r5.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.12xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r5.12xlarge",
"name": "r5.12xlarge",
"ram": 393216,
},
"r5.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13600 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.16xlarge",
"instancesku": "Z2E8HZWVPEUN65QE",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r5.16xlarge",
"name": "r5.16xlarge",
"ram": 524288,
},
"r5.24xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r5.24xlarge",
"name": "r5.24xlarge",
"ram": 786432,
},
"r5.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4750 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.2xlarge",
"instancesku": "D5AEEW57JF2DX3U9",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r5.2xlarge",
"name": "r5.2xlarge",
"ram": 65536,
},
"r5.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r5.4xlarge",
"name": "r5.4xlarge",
"ram": 131072,
},
"r5.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6800 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.8xlarge",
"instancesku": "PW6P8Z944EA4HYFQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r5.8xlarge",
"name": "r5.8xlarge",
"ram": 262144,
},
"r5.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4750 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r5.large",
"name": "r5.large",
"ram": 16384,
},
"r5.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4750 Mbps",
"ecu": "19",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r5.xlarge",
"name": "r5.xlarge",
"ram": 32768,
},
"r5a.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r5a.12xlarge",
"name": "r5a.12xlarge",
"ram": 393216,
},
"r5a.16xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.16xlarge",
"instancesku": "2B3VAYDSHV6M39S6",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r5a.16xlarge",
"name": "r5a.16xlarge",
"ram": 524288,
},
"r5a.24xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.24xlarge",
"instancesku": "YG2S4S8SBWEBG9X7",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r5a.24xlarge",
"name": "r5a.24xlarge",
"ram": 786432,
},
"r5a.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r5a.2xlarge",
"name": "r5a.2xlarge",
"ram": 65536,
},
"r5a.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.4xlarge",
"instancesku": "9USH93WEV7YHKN3N",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r5a.4xlarge",
"name": "r5a.4xlarge",
"ram": 131072,
},
"r5a.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r5a.8xlarge",
"name": "r5a.8xlarge",
"ram": 262144,
},
"r5a.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.large",
"instancesku": "HXFJ8J7KRRDCUQDF",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r5a.large",
"name": "r5a.large",
"ram": 16384,
},
"r5a.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r5a.xlarge",
"name": "r5a.xlarge",
"ram": 32768,
},
"r5ad.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.12xlarge",
"instancesku": "BPGWTSU35274F5JD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.12xlarge",
"name": "r5ad.12xlarge",
"ram": 393216,
},
"r5ad.16xlarge": {
"bandwidth": 12,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.16xlarge",
"instancesku": "H8NDT2T8J8KSRGRD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.16xlarge",
"name": "r5ad.16xlarge",
"ram": 524288,
},
"r5ad.24xlarge": {
"bandwidth": 20,
"disk": 3600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.24xlarge",
"instancesku": "AYARQMMWY3KRT3Z8",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.24xlarge",
"name": "r5ad.24xlarge",
"ram": 786432,
},
"r5ad.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.2xlarge",
"instancesku": "ZR4V2JBYXNCCZXR3",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.2xlarge",
"name": "r5ad.2xlarge",
"ram": 65536,
},
"r5ad.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.4xlarge",
"name": "r5ad.4xlarge",
"ram": 131072,
},
"r5ad.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.8xlarge",
"instancesku": "KKTZFU3YAWTUZQEF",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.8xlarge",
"name": "r5ad.8xlarge",
"ram": 262144,
},
"r5ad.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.large",
"instancesku": "NK72MQAHKR4TXF5P",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.large",
"name": "r5ad.large",
"ram": 16384,
},
"r5ad.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.xlarge",
"instancesku": "HZJ8V6JGDWGUM5QF",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r5ad.xlarge",
"name": "r5ad.xlarge",
"ram": 32768,
},
"r5b.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "30 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.12xlarge",
"instancesku": "7ZXNNN5Z4DMKZ66A",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r5b.12xlarge",
"name": "r5b.12xlarge",
"ram": 393216,
},
"r5b.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.16xlarge",
"instancesku": "6U7DBQWKEUPM26YD",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r5b.16xlarge",
"name": "r5b.16xlarge",
"ram": 524288,
},
"r5b.24xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "60 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.24xlarge",
"instancesku": "PDJDMA2YH5M6XBP6",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r5b.24xlarge",
"name": "r5b.24xlarge",
"ram": 786432,
},
"r5b.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 12.5 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r5b.2xlarge",
"name": "r5b.2xlarge",
"ram": 65536,
},
"r5b.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.4xlarge",
"instancesku": "SA38FHJSUCZ3P2Z7",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r5b.4xlarge",
"name": "r5b.4xlarge",
"ram": 131072,
},
"r5b.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r5b.8xlarge",
"name": "r5b.8xlarge",
"ram": 262144,
},
"r5b.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.large",
"instancesku": "P8BAP4U25KDKYSE7",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r5b.large",
"name": "r5b.large",
"ram": 16384,
},
"r5b.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5b.xlarge",
"instancesku": "4ZC3KAXZFYGWRNCX",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r5b.xlarge",
"name": "r5b.xlarge",
"ram": 32768,
},
"r5d.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.12xlarge",
"instancesku": "JD26ZJK92H6X64TZ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-north-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r5d.12xlarge",
"name": "r5d.12xlarge",
"ram": 393216,
},
"r5d.16xlarge": {
"bandwidth": 20,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.16xlarge",
"instancesku": "XJ5CFKTMG2X7XGD7",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r5d.16xlarge",
"name": "r5d.16xlarge",
"ram": 524288,
},
"r5d.24xlarge": {
"bandwidth": 25,
"disk": 3600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r5d.24xlarge",
"name": "r5d.24xlarge",
"ram": 786432,
},
"r5d.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.2xlarge",
"instancesku": "75EXHEYU7R3CEX8D",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r5d.2xlarge",
"name": "r5d.2xlarge",
"ram": 65536,
},
"r5d.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.4xlarge",
"instancesku": "XV8MVPMGEYQC3PX7",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r5d.4xlarge",
"name": "r5d.4xlarge",
"ram": 131072,
},
"r5d.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.8xlarge",
"instancesku": "QCJ53WA9MJMS6BDC",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r5d.8xlarge",
"name": "r5d.8xlarge",
"ram": 262144,
},
"r5d.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.large",
"instancesku": "HE833VNWFWBDRWT4",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r5d.large",
"name": "r5d.large",
"ram": 16384,
},
"r5d.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "19",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.xlarge",
"instancesku": "2NUFS4WQ7FCZY3CX",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r5d.xlarge",
"name": "r5d.xlarge",
"ram": 32768,
},
"r5dn.12xlarge": {
"bandwidth": 50,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.12xlarge",
"instancesku": "KP8X3ZRFBR5BE2QG",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 GB NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.12xlarge",
"name": "r5dn.12xlarge",
"ram": 393216,
},
"r5dn.16xlarge": {
"bandwidth": 75,
"disk": 2400,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.16xlarge",
"instancesku": "9CC8VC9DH37UZAWT",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.16xlarge",
"name": "r5dn.16xlarge",
"ram": 524288,
},
"r5dn.24xlarge": {
"bandwidth": 100,
"disk": 3600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.24xlarge",
"instancesku": "8PAUA987J4Q8MTRH",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.24xlarge",
"name": "r5dn.24xlarge",
"ram": 786432,
},
"r5dn.2xlarge": {
"bandwidth": 25,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.2xlarge",
"instancesku": "5TFDK9PHN5A9RS5A",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.2xlarge",
"name": "r5dn.2xlarge",
"ram": 65536,
},
"r5dn.4xlarge": {
"bandwidth": 25,
"disk": 600,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.4xlarge",
"name": "r5dn.4xlarge",
"ram": 131072,
},
"r5dn.8xlarge": {
"bandwidth": 25,
"disk": 1200,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.8xlarge",
"name": "r5dn.8xlarge",
"ram": 262144,
},
"r5dn.large": {
"bandwidth": 25,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.large",
"instancesku": "K4T5UGP4HDGJPFH7",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.large",
"name": "r5dn.large",
"ram": 16384,
},
"r5dn.xlarge": {
"bandwidth": 25,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.xlarge",
"instancesku": "PTAK6AMJWZ33TRF6",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r5dn.xlarge",
"name": "r5dn.xlarge",
"ram": 32768,
},
"r5n.12xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r5n.12xlarge",
"name": "r5n.12xlarge",
"ram": 393216,
},
"r5n.16xlarge": {
"bandwidth": 75,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r5n.16xlarge",
"name": "r5n.16xlarge",
"ram": 524288,
},
"r5n.24xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r5n.24xlarge",
"name": "r5n.24xlarge",
"ram": 786432,
},
"r5n.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r5n.2xlarge",
"name": "r5n.2xlarge",
"ram": 65536,
},
"r5n.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.4xlarge",
"instancesku": "F6MSS2DTVUQR6VVK",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r5n.4xlarge",
"name": "r5n.4xlarge",
"ram": 131072,
},
"r5n.8xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.8xlarge",
"instancesku": "RM86NRBPZ5CDPPRU",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r5n.8xlarge",
"name": "r5n.8xlarge",
"ram": 262144,
},
"r5n.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.large",
"instancesku": "6WD6N47RFBKDZDF8",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r5n.large",
"name": "r5n.large",
"ram": 16384,
},
"r5n.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r5n.xlarge",
"name": "r5n.xlarge",
"ram": 32768,
},
"r6g.12xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r6g.12xlarge",
"name": "r6g.12xlarge",
"ram": 393216,
},
"r6g.16xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "18000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.16xlarge",
"instancesku": "6TECJVQ38AJEB8QM",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r6g.16xlarge",
"name": "r6g.16xlarge",
"ram": 524288,
},
"r6g.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.2xlarge",
"instancesku": "G7GDESM5VCN79HV9",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r6g.2xlarge",
"name": "r6g.2xlarge",
"ram": 65536,
},
"r6g.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.4xlarge",
"instancesku": "AWVHFPP2ZCKU6H6Y",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r6g.4xlarge",
"name": "r6g.4xlarge",
"ram": 131072,
},
"r6g.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.8xlarge",
"instancesku": "JU3XS3CSJVG9PG6T",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r6g.8xlarge",
"name": "r6g.8xlarge",
"ram": 262144,
},
"r6g.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.large",
"instancesku": "3F7S3UNTHN3MP78B",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r6g.large",
"name": "r6g.large",
"ram": 16384,
},
"r6g.medium": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.medium",
"instancesku": "94RYVQHX4TWDPAUG",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "r6g.medium",
"name": "r6g.medium",
"ram": 8192,
},
"r6g.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6g.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r6g.xlarge",
"name": "r6g.xlarge",
"ram": 32768,
},
"r6gd.12xlarge": {
"bandwidth": 20,
"disk": 2850,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "13500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.12xlarge",
"instancesku": "AB7MXUVQDAHCPG8U",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1425 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.12xlarge",
"name": "r6gd.12xlarge",
"ram": 393216,
},
"r6gd.16xlarge": {
"bandwidth": 25,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "18000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.16xlarge",
"name": "r6gd.16xlarge",
"ram": 524288,
},
"r6gd.2xlarge": {
"bandwidth": 10,
"disk": 475,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.2xlarge",
"instancesku": "S7R4T7ZX2D6V4MVY",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 475 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.2xlarge",
"name": "r6gd.2xlarge",
"ram": 65536,
},
"r6gd.4xlarge": {
"bandwidth": 10,
"disk": 950,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.4xlarge",
"instancesku": "ZRRABC3BGW9ADYVP",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 950 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.4xlarge",
"name": "r6gd.4xlarge",
"ram": 131072,
},
"r6gd.8xlarge": {
"bandwidth": 10,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.8xlarge",
"instancesku": "29TFCYKSEGWR8SU4",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.8xlarge",
"name": "r6gd.8xlarge",
"ram": 262144,
},
"r6gd.large": {
"bandwidth": 10,
"disk": 118,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.large",
"instancesku": "X9MV6RERD7HKEZ65",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 118 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.large",
"name": "r6gd.large",
"ram": 16384,
},
"r6gd.medium": {
"bandwidth": 10,
"disk": 59,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.medium",
"instancesku": "6UT3UMQBXYMNWFKU",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 59 NVMe SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.medium",
"name": "r6gd.medium",
"ram": 8192,
},
"r6gd.xlarge": {
"bandwidth": 10,
"disk": 237,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6gd.xlarge",
"instancesku": "PT8VHX8W2UHY4NRQ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 237 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r6gd.xlarge",
"name": "r6gd.xlarge",
"ram": 32768,
},
"r6i.12xlarge": {
"bandwidth": 18750,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "15000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.12xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "18750 Megabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "r6i.12xlarge",
"name": "r6i.12xlarge",
"ram": 393216,
},
"r6i.16xlarge": {
"bandwidth": 25000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "25000 Megabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "r6i.16xlarge",
"name": "r6i.16xlarge",
"ram": 524288,
},
"r6i.24xlarge": {
"bandwidth": 37500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "30000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "37500 Megabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "r6i.24xlarge",
"name": "r6i.24xlarge",
"ram": 786432,
},
"r6i.2xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.2xlarge",
"instancesku": "GCMURA7T4XN4SCDF",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "r6i.2xlarge",
"name": "r6i.2xlarge",
"ram": 65536,
},
"r6i.32xlarge": {
"bandwidth": 50000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.32xlarge",
"instancesku": "NAXJMGJV4T3ATM9C",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1024 GiB",
"networkPerformance": "50000 Megabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "r6i.32xlarge",
"name": "r6i.32xlarge",
"ram": 1048576,
},
"r6i.4xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.4xlarge",
"instancesku": "GWXAMKAUW5XHZMA3",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "r6i.4xlarge",
"name": "r6i.4xlarge",
"ram": 131072,
},
"r6i.8xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.8xlarge",
"instancesku": "KY8HJM2YQXMC39AD",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "12500 Megabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "r6i.8xlarge",
"name": "r6i.8xlarge",
"ram": 262144,
},
"r6i.large": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.large",
"instancesku": "ZRKDCCCDPT6THG8V",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "r6i.large",
"name": "r6i.large",
"ram": 16384,
},
"r6i.xlarge": {
"bandwidth": 12500,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r6i.xlarge",
"instancesku": "X5U3U87FSV7HP5DF",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 12500 Megabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon 8375C (Ice Lake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "r6i.xlarge",
"name": "r6i.xlarge",
"ram": 32768,
},
"t1.micro": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "No",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Micro instances",
"instanceType": "t1.micro",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "0.613 GiB",
"networkPerformance": "Very Low",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "Variable",
"processorArchitecture": "32-bit or 64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "t1.micro",
"name": "t1.micro",
"ram": 627,
},
"t2.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.2xlarge",
"instancesku": "6BRS5QCYDYJMYCM8",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "t2.2xlarge",
"name": "t2.2xlarge",
"ram": 32768,
},
"t2.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.large",
"instancesku": "PAKV95ZHG77BFWBG",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t2.large",
"name": "t2.large",
"ram": 8192,
},
"t2.medium": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.medium",
"instancesku": "DY99AWFYB6SX74KD",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t2.medium",
"name": "t2.medium",
"ram": 4096,
},
"t2.micro": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.micro",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "eu-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "t2.micro",
"name": "t2.micro",
"ram": 1024,
},
"t2.nano": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.nano",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "0.5 GiB",
"networkPerformance": "Low",
"normalizationSizeFactor": "0.25",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "t2.nano",
"name": "t2.nano",
"ram": 512,
},
"t2.small": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.small",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "1",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "t2.small",
"name": "t2.small",
"ram": 2048,
},
"t2.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "t2.xlarge",
"name": "t2.xlarge",
"ram": 16384,
},
"t3.2xlarge": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.2xlarge",
"instancesku": "CAB4TUSN8KCWWMKW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-3",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "t3.2xlarge",
"name": "t3.2xlarge",
"ram": 32768,
},
"t3.large": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3.large",
"name": "t3.large",
"ram": 8192,
},
"t3.medium": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.medium",
"instancesku": "YZ5V7FVMR7UMXETK",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3.medium",
"name": "t3.medium",
"ram": 4096,
},
"t3.micro": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.micro",
"instancesku": "VJNS2RY9CFHRVCMH",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3.micro",
"name": "t3.micro",
"ram": 1024,
},
"t3.nano": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.nano",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "0.5 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "0.25",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3.nano",
"name": "t3.nano",
"ram": 512,
},
"t3.small": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.small",
"instancesku": "F77PBHT47MJ9J4UZ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "1",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3.small",
"name": "t3.small",
"ram": 2048,
},
"t3.xlarge": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.xlarge",
"instancesku": "X6S8KH4ZJATAS9WQ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "t3.xlarge",
"name": "t3.xlarge",
"ram": 16384,
},
"t3a.2xlarge": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.2xlarge",
"instancesku": "XJ5J37BQ39MMA7N8",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "t3a.2xlarge",
"name": "t3a.2xlarge",
"ram": 32768,
},
"t3a.large": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.large",
"instancesku": "AD37JQR5HHVRCCGH",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3a.large",
"name": "t3a.large",
"ram": 8192,
},
"t3a.medium": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.medium",
"instancesku": "GQT58B965R2X62MH",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "us-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3a.medium",
"name": "t3a.medium",
"ram": 4096,
},
"t3a.micro": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.micro",
"instancesku": "QVK78M9BTVPADRF8",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "1 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3a.micro",
"name": "t3a.micro",
"ram": 1024,
},
"t3a.nano": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.nano",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "0.5 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "0.25",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3a.nano",
"name": "t3a.nano",
"ram": 512,
},
"t3a.small": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.small",
"instancesku": "MCQ7R7JB2DPXEKET",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "1",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t3a.small",
"name": "t3a.small",
"ram": 2048,
},
"t3a.xlarge": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.xlarge",
"instancesku": "YUQH22HS49VU8AJK",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "t3a.xlarge",
"name": "t3a.xlarge",
"ram": 16384,
},
"t4g.2xlarge": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t4g.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "t4g.2xlarge",
"name": "t4g.2xlarge",
"ram": 32768,
},
"t4g.large": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t4g.large",
"instancesku": "8N9PPA2VHETNFJ6J",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "8 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t4g.large",
"name": "t4g.large",
"ram": 8192,
},
"t4g.medium": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t4g.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "4 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t4g.medium",
"name": "t4g.medium",
"ram": 4096,
},
"t4g.micro": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t4g.micro",
"instancesku": "S2M8BS97FGPUHMN7",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "1 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "sa-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t4g.micro",
"name": "t4g.micro",
"ram": 1024,
},
"t4g.nano": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t4g.nano",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "0.5 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "0.25",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t4g.nano",
"name": "t4g.nano",
"ram": 512,
},
"t4g.small": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2085 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t4g.small",
"instancesku": "Y33CHC2PYKBQ7U2C",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "2 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "1",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "t4g.small",
"name": "t4g.small",
"ram": 2048,
},
"t4g.xlarge": {
"bandwidth": 5,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2780 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t4g.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 5 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "t4g.xlarge",
"name": "t4g.xlarge",
"ram": 16384,
},
"u-12tb1.112xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "38000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "u-12tb1.112xlarge",
"instancesku": "496SWG6C9RT9DJEH",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "12288 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Scalable (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "448",
"vpcnetworkingsupport": "true",
},
"id": "u-12tb1.112xlarge",
"name": "u-12tb1.112xlarge",
"ram": 12582912,
},
"u-3tb1.56xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "u-3tb1.56xlarge",
"instancesku": "SAPP6Y5NAHGYJNTE",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "3072 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "448",
"physicalProcessor": "Intel Xeon Scalable (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX2; Intel AVX; Intel AVX2",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "224",
"vpcnetworkingsupport": "true",
},
"id": "u-3tb1.56xlarge",
"name": "u-3tb1.56xlarge",
"ram": 3145728,
},
"u-6tb1.112xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "38000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "u-6tb1.112xlarge",
"instancesku": "MTWR7MAD77AF9SWK",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "6144 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Scalable (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; Intel AVX; Intel AVX2",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "448",
"vpcnetworkingsupport": "true",
},
"id": "u-6tb1.112xlarge",
"name": "u-6tb1.112xlarge",
"ram": 6291456,
},
"u-6tb1.56xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "38000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "u-6tb1.56xlarge",
"instancesku": "QY362HX289UBKXNK",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "6144 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Scalable (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; Intel AVX; Intel AVX2",
"regionCode": "eu-south-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "224",
"vpcnetworkingsupport": "true",
},
"id": "u-6tb1.56xlarge",
"name": "u-6tb1.56xlarge",
"ram": 6291456,
},
"u-9tb1.112xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "38000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "u-9tb1.112xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "9216 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Scalable (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "448",
"vpcnetworkingsupport": "true",
},
"id": "u-9tb1.112xlarge",
"name": "u-9tb1.112xlarge",
"ram": 9437184,
},
"vt1.24xlarge": {
"bandwidth": 25000,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Media Accelerator Instances",
"instanceType": "vt1.24xlarge",
"instancesku": "ADK6CBZ878Z8CJQT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "25000 Megabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259CL",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "vt1.24xlarge",
"name": "vt1.24xlarge",
"ram": 196608,
},
"vt1.3xlarge": {
"bandwidth": 3125,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2375 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Media Accelerator Instances",
"instanceType": "vt1.3xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "24 GiB",
"networkPerformance": "3125 Megabit",
"normalizationSizeFactor": "24",
"physicalProcessor": "Intel Xeon Platinum 8259CL",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "12",
"vpcnetworkingsupport": "true",
},
"id": "vt1.3xlarge",
"name": "vt1.3xlarge",
"ram": 24576,
},
"vt1.6xlarge": {
"bandwidth": 6250,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Media Accelerator Instances",
"instanceType": "vt1.6xlarge",
"instancesku": "PY3K83PMVBEUYB32",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "48 GiB",
"networkPerformance": "6250 Megabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8259CL",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "24",
"vpcnetworkingsupport": "true",
},
"id": "vt1.6xlarge",
"name": "vt1.6xlarge",
"ram": 49152,
},
"x1.16xlarge": {
"bandwidth": None,
"disk": 1920,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "174.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1.16xlarge",
"instancesku": "XA7V7S39QEH3YU98",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "976 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "128",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "us-west-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1920 SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "x1.16xlarge",
"name": "x1.16xlarge",
"ram": 999424,
},
"x1.32xlarge": {
"bandwidth": None,
"disk": 3840,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "349",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1.32xlarge",
"instancesku": "XJ59DS49UK4EX6TK",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1952 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "256",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1920 SSD",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "x1.32xlarge",
"name": "x1.32xlarge",
"ram": 1998848,
},
"x1e.16xlarge": {
"bandwidth": 10,
"disk": 1920,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "179",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "1952 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1920 SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "x1e.16xlarge",
"name": "x1e.16xlarge",
"ram": 1998848,
},
"x1e.2xlarge": {
"bandwidth": 10,
"disk": 240,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "23",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "244 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 240 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "x1e.2xlarge",
"name": "x1e.2xlarge",
"ram": 249856,
},
"x1e.32xlarge": {
"bandwidth": 25,
"disk": 3840,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "340",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.32xlarge",
"instancesku": "Q6G5G6C6EJEDN5AY",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "3904 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"regionCode": "ca-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1920 SSD",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "x1e.32xlarge",
"name": "x1e.32xlarge",
"ram": 3997696,
},
"x1e.4xlarge": {
"bandwidth": 10,
"disk": 480,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "47",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "488 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 480 SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "x1e.4xlarge",
"name": "x1e.4xlarge",
"ram": 499712,
},
"x1e.8xlarge": {
"bandwidth": 10,
"disk": 960,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "91",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.8xlarge",
"instancesku": "5ZY9WGJKJ9DF8XVF",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "976 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 960 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "x1e.8xlarge",
"name": "x1e.8xlarge",
"ram": 999424,
},
"x1e.xlarge": {
"bandwidth": 10,
"disk": 120,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "500 Mbps",
"ecu": "12",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.xlarge",
"instancesku": "BZCNQCWU5XA2AGR8",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 120 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "x1e.xlarge",
"name": "x1e.xlarge",
"ram": 124928,
},
"x2gd.12xlarge": {
"bandwidth": 20,
"disk": 2850,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14250 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.12xlarge",
"instancesku": "YHYZ8JNVTJ3DYNPQ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1425 SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.12xlarge",
"name": "x2gd.12xlarge",
"ram": 786432,
},
"x2gd.16xlarge": {
"bandwidth": 25,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.16xlarge",
"instancesku": "N35TPJKFBWBTRUM3",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "1024 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.16xlarge",
"name": "x2gd.16xlarge",
"ram": 1048576,
},
"x2gd.2xlarge": {
"bandwidth": 10,
"disk": 474,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.2xlarge",
"instancesku": "QWQAU6M6EHE8FZV3",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 474 SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.2xlarge",
"name": "x2gd.2xlarge",
"ram": 131072,
},
"x2gd.4xlarge": {
"bandwidth": 10,
"disk": 950,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.4xlarge",
"instancesku": "KTMUE4YKBZGAERNT",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 950 SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.4xlarge",
"name": "x2gd.4xlarge",
"ram": 262144,
},
"x2gd.8xlarge": {
"bandwidth": 12,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.8xlarge",
"name": "x2gd.8xlarge",
"ram": 524288,
},
"x2gd.large": {
"bandwidth": 10,
"disk": 118,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.large",
"instancesku": "Z6BB6KXMRVW9EJ69",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 118 SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.large",
"name": "x2gd.large",
"ram": 32768,
},
"x2gd.medium": {
"bandwidth": 10,
"disk": 59,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.medium",
"instancesku": "8MEF6Y3MQGMTD8RY",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 59 SSD",
"vcpu": "1",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.medium",
"name": "x2gd.medium",
"ram": 16384,
},
"x2gd.xlarge": {
"bandwidth": 10,
"disk": 237,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2gd.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 237 SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "x2gd.xlarge",
"name": "x2gd.xlarge",
"ram": 65536,
},
"x2idn.16xlarge": {
"bandwidth": 50,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2idn.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1024 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "x2idn.16xlarge",
"name": "x2idn.16xlarge",
"ram": 1048576,
},
"x2idn.24xlarge": {
"bandwidth": 75,
"disk": 2850,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "60 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2idn.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1536 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-central-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1425 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "x2idn.24xlarge",
"name": "x2idn.24xlarge",
"ram": 1572864,
},
"x2idn.32xlarge": {
"bandwidth": 100,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "80 Gbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2idn.32xlarge",
"instancesku": "8ZJ2BP49SS5QKPBU",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "2048 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "x2idn.32xlarge",
"name": "x2idn.32xlarge",
"ram": 2097152,
},
"x2iedn.16xlarge": {
"bandwidth": 50,
"disk": 1900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "40000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iedn.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "2048 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 NVMe SSD",
"vcpu": "64",
"vpcnetworkingsupport": "true",
},
"id": "x2iedn.16xlarge",
"name": "x2iedn.16xlarge",
"ram": 2097152,
},
"x2iedn.24xlarge": {
"bandwidth": 75,
"disk": 2850,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "60000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iedn.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "3072 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1425 NVMe SSD",
"vcpu": "96",
"vpcnetworkingsupport": "true",
},
"id": "x2iedn.24xlarge",
"name": "x2iedn.24xlarge",
"ram": 3145728,
},
"x2iedn.2xlarge": {
"bandwidth": 25,
"disk": 237,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iedn.2xlarge",
"instancesku": "63BDVCXPJJ8Q4CK7",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 237 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "x2iedn.2xlarge",
"name": "x2iedn.2xlarge",
"ram": 262144,
},
"x2iedn.32xlarge": {
"bandwidth": 100,
"disk": 3800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "80000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iedn.32xlarge",
"instancesku": "3EHWHRSQJBFQT47A",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "4096 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "128",
"vpcnetworkingsupport": "true",
},
"id": "x2iedn.32xlarge",
"name": "x2iedn.32xlarge",
"ram": 4194304,
},
"x2iedn.4xlarge": {
"bandwidth": 25,
"disk": 475,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iedn.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 475 NVMe SSD",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "x2iedn.4xlarge",
"name": "x2iedn.4xlarge",
"ram": 524288,
},
"x2iedn.8xlarge": {
"bandwidth": 25,
"disk": 950,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iedn.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1024 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 950 NVMe SSD",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "x2iedn.8xlarge",
"name": "x2iedn.8xlarge",
"ram": 1048576,
},
"x2iedn.xlarge": {
"bandwidth": 25,
"disk": 118,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "3.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 20000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iedn.xlarge",
"instancesku": "8JDVXAWQBQGH2DE9",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "128 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Scalable (Icelake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 118 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "x2iedn.xlarge",
"name": "x2iedn.xlarge",
"ram": 131072,
},
"x2iezn.12xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "19000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iezn.12xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1536 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "x2iezn.12xlarge",
"name": "x2iezn.12xlarge",
"ram": 1572864,
},
"x2iezn.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3170 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iezn.2xlarge",
"instancesku": "D9NTTVU6X5ZXW8XW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "256 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "x2iezn.2xlarge",
"name": "x2iezn.2xlarge",
"ram": 262144,
},
"x2iezn.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4750 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iezn.4xlarge",
"instancesku": "6NEBVRJ8CDWDW8RV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "512 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16",
"vpcnetworkingsupport": "true",
},
"id": "x2iezn.4xlarge",
"name": "x2iezn.4xlarge",
"ram": 524288,
},
"x2iezn.6xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iezn.6xlarge",
"instancesku": "8S5BF3CQBGR6BV8B",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "768 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "24",
"vpcnetworkingsupport": "true",
},
"id": "x2iezn.6xlarge",
"name": "x2iezn.6xlarge",
"ram": 786432,
},
"x2iezn.8xlarge": {
"bandwidth": 75,
"disk": 0,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x2iezn.8xlarge",
"instancesku": "5STCQJH5K5KVSW5M",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "1024 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8252",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32",
"vpcnetworkingsupport": "true",
},
"id": "x2iezn.8xlarge",
"name": "x2iezn.8xlarge",
"ram": 1048576,
},
"z1d.12xlarge": {
"bandwidth": 25,
"disk": 1800,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "235",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.12xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "384 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-southeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48",
"vpcnetworkingsupport": "true",
},
"id": "z1d.12xlarge",
"name": "z1d.12xlarge",
"ram": 393216,
},
"z1d.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2333 Mbps",
"ecu": "45",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.2xlarge",
"instancesku": "TJN3TJCJX99J6MMW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8",
"vpcnetworkingsupport": "true",
},
"id": "z1d.2xlarge",
"name": "z1d.2xlarge",
"ram": 65536,
},
"z1d.3xlarge": {
"bandwidth": 10,
"disk": 450,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "64",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.3xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "96 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "24",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 450 NVMe SSD",
"vcpu": "12",
"vpcnetworkingsupport": "true",
},
"id": "z1d.3xlarge",
"name": "z1d.3xlarge",
"ram": 98304,
},
"z1d.6xlarge": {
"bandwidth": 10,
"disk": 900,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "116",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.6xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "eu-west-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 900 NVMe SSD",
"vcpu": "24",
"vpcnetworkingsupport": "true",
},
"id": "z1d.6xlarge",
"name": "z1d.6xlarge",
"ram": 196608,
},
"z1d.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2333 Mbps",
"ecu": "12",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.large",
"instancesku": "TC5JJ4DDT49HWM6A",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "us-east-1",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2",
"vpcnetworkingsupport": "true",
},
"id": "z1d.large",
"name": "z1d.large",
"ram": 16384,
},
"z1d.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"availabilityzone": "NA",
"classicnetworkingsupport": "false",
"clockSpeed": "4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2333 Mbps",
"ecu": "23",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.xlarge",
"instancesku": "85BM84CW7NC939FV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"marketoption": "OnDemand",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"regionCode": "ap-northeast-2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4",
"vpcnetworkingsupport": "true",
},
"id": "z1d.xlarge",
"name": "z1d.xlarge",
"ram": 32768,
},
}
|
{
"content_hash": "1c435af4f0c605691956462986d428b1",
"timestamp": "",
"source": "github",
"line_count": 16003,
"max_line_length": 111,
"avg_line_length": 37.751984002999436,
"alnum_prop": 0.49710086154813826,
"repo_name": "mistio/libcloud",
"id": "6b36b6ee0a236bf163a9d08a27a785b279365408",
"size": "605005",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/compute/constants/ec2_instance_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9067225"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
}
|
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import threading
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.') if v.isdigit())
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 4, 5):
raise ImproperlyConfigured("psycopg2_version 2.4.5 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import utc_tzinfo_factory # NOQA isort:skip
from .version import get_version # NOQA isort:skip
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BigAutoField': 'bigserial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self._named_cursor_idx = 0
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def ensure_timezone(self):
self.ensure_connection()
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with self.connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
timezone_changed = self.ensure_timezone()
if timezone_changed:
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self, name=None):
if name:
# In autocommit mode, the cursor will be used outside of a
# transaction, hence use a holdable cursor.
cursor = self.connection.cursor(name, scrollable=False, withhold=self.connection.autocommit)
else:
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def chunked_cursor(self):
self._named_cursor_idx += 1
return self._cursor(
name='_django_curs_%d_%d' % (
# Avoid reusing name in other threads
threading.current_thread().ident,
self._named_cursor_idx,
)
)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super(DatabaseWrapper, self)._nodb_connection
try:
nodb_connection.ensure_connection()
except (Database.DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def psycopg2_version(self):
return PSYCOPG2_VERSION
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
|
{
"content_hash": "e0b24f296657d719ad37a3d2e1c3cd86",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 113,
"avg_line_length": 39.172661870503596,
"alnum_prop": 0.6157024793388429,
"repo_name": "mattseymour/django",
"id": "a3b293ee366ad1f73275f778e9a3348385d94335",
"size": "10890",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/backends/postgresql/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "182963"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11845544"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
This module does hyper parameters optimization -- find the best parameters for estimator using different optimization models.
"""
# TODO think of pareto-optimization
from __future__ import division, print_function, absolute_import
from itertools import islice
from collections import OrderedDict
import logging
from sklearn.base import clone
import numpy
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.utils.random import check_random_state
from six.moves import zip
from ..estimators.utils import check_inputs
__author__ = 'Alex Rogozhnikov, Tatiana Likhomanenko'
class AbstractParameterGenerator(object):
"""
Abstract class for grid search algorithm.
The aim of this class is to generate new points, where the function (estimator) will be computed.
You can define your own algorithm of step location of parameters grid.
Parameters:
----------
:param OrderedDict param_grid: the grid with parameters to optimize on
:param int n_evaluations: the number of evaluations to do
:param random_state: random generator
:type random_state: int or RandomState or None
"""
def __init__(self, param_grid, n_evaluations=10, random_state=None):
assert isinstance(param_grid, dict), 'the passed param_grid should be of OrderedDict class'
self.param_grid = OrderedDict(param_grid)
_check_param_grid(param_grid)
self.dimensions = list([len(param_values) for param, param_values in self.param_grid.items()])
size = numpy.prod(self.dimensions)
assert size > 1, 'The space of parameters contains only %i points' % size
self.n_evaluations = min(n_evaluations, size)
# results on different parameters
self.grid_scores_ = OrderedDict()
# all the tasks that are being computed or already computed
self.queued_tasks_ = set()
self.random_state = check_random_state(random_state)
self.evaluations_done = 0
def _indices_to_parameters(self, state_indices):
"""
Point in parameter space kept as sequence of indices, i.e.:
max_depth: 1, 2, 4, 8
learning_rate = 0.01, 0.1, 0.2
Then max_depth=4, learning_rate=0.1 has internal representation as (2, 1)
:param state_indices: sequence of integers, i.e. (1, 2)
:return: OrderedDict, like {max_depth=4, learning_rate=0.1}
"""
return OrderedDict([(name, values[i]) for i, (name, values) in zip(state_indices, self.param_grid.items())])
def _generate_random_point(self, enqueue=True):
while True:
result = tuple([self.random_state.randint(0, size) for size in self.dimensions])
if result not in self.queued_tasks_:
if enqueue:
self.queued_tasks_.add(result)
return result
def generate_next_point(self):
"""Generating next random point in parameters space"""
raise NotImplementedError('Should be overriden by descendant')
def generate_batch_points(self, size):
"""
Generate several points in parameter space at once (needed when using parallel computations)
:param size: how many points we shall generate
:return: sequence of tuples, each tuple representing it's own
"""
# may be overriden in descendants
state_indices = []
for _ in range(size):
state_indices.append(self.generate_next_point())
return zip(*state_indices)
def add_result(self, state_indices, value):
"""
After the model was trained and evaluated for specific set of parameters,
we use this function to store result
:param state_indices: tuple, which represents the space
:param value: quality at this point
"""
self.grid_scores_[state_indices] = value
@property
def best_score_(self):
"""
Property, return best score of optimization
"""
return numpy.max(self.grid_scores_.values())
@property
def best_params_(self):
"""
Property, return point of parameters grid with the best score
"""
return self._indices_to_parameters(max(self.grid_scores_.items(), key=lambda x: x[1])[0])
def print_results(self, reorder=True):
"""
Prints the results of training
:param bool reorder: if reorder==True, best results go earlier,
otherwise the results are printed in the order of computation
"""
sequence = self.grid_scores_.items()
if reorder:
sequence = sorted(sequence, key=lambda x: -x[1])
for state_indices, value in sequence:
state_string = ", ".join([name_value[0] + '=' + str(name_value[1]) for name_value
in self._indices_to_parameters(state_indices).items()])
print("{0:.3f}: {1}".format(value, state_string))
class RandomParameterOptimizer(AbstractParameterGenerator):
"""
Random generation of new grid point.
"""
def generate_next_point(self):
"""Generating next random point in parameters space"""
if len(self.queued_tasks_) >= numpy.prod(self.dimensions):
raise RuntimeError("The grid is exhausted, cannot generate more points")
new_state_indices = self._generate_random_point()
return new_state_indices, self._indices_to_parameters(new_state_indices)
class RegressionParameterOptimizer(AbstractParameterGenerator):
"""
To generate next point of grid regressor will be used to estimate score for all next point in such way
that the point with the best estimated score will be chosen
Parameters:
----------
:param OrderedDict param_grid: the grid with parameters to optimize on
:param int n_evaluations: the number of evaluations to do
:param random_state: random generator
:type random_state: int or RandomState or None
:param int start_evaluations: count of random point generation on start
:param int n_attempts:
:param regressor: regressor to choose appropriate next point with potential best score
(estimated this score by regressor); If None them RandomForest algorithm will be used.
"""
def __init__(self, param_grid, n_evaluations=10, random_state=None,
start_evaluations=3, n_attempts=5, regressor=None):
AbstractParameterGenerator.__init__(self, param_grid=param_grid, n_evaluations=n_evaluations,
random_state=random_state)
if regressor is None:
regressor = RandomForestRegressor(max_depth=3, n_estimators=10, max_features=0.7)
self.regressor = regressor
self.n_attempts = n_attempts
self.start_evaluations = start_evaluations
def generate_next_point(self):
"""Generating next random point in parameters space"""
if len(self.queued_tasks_) > numpy.prod(self.dimensions) + self.n_attempts:
raise RuntimeError("The grid is exhausted, cannot generate more points")
if len(self.queued_tasks_) < self.start_evaluations:
new_state_indices = self._generate_random_point()
return new_state_indices, self._indices_to_parameters(new_state_indices)
# Training regressor
X = numpy.array([list(x) for x in self.grid_scores_.keys()], dtype=int)
y = list(self.grid_scores_.values())
regressor = clone(self.regressor).fit(X, y)
# generating candidates
candidates = numpy.array([list(self._generate_random_point(enqueue=False))
for _ in range(self.n_attempts)], dtype=int)
# winning candidate index
index = regressor.predict(candidates).argmax()
new_state_indices = tuple(candidates[index, :])
# remember the task
self.queued_tasks_.add(new_state_indices)
return new_state_indices, self._indices_to_parameters(new_state_indices)
class AnnealingParameterOptimizer(AbstractParameterGenerator):
def __init__(self, param_grid, n_evaluations=10, temperature=0.2, random_state=None):
"""
Implementation if annealing algorithm
Parameters
----------
:param param_grid: the grid with parameters to optimize on
:param int n_evaluations: the number od evaluations
:param temperature: float, how tolerant we are to worse results.
If it is very small, will never step to point with worse predictions.
Doesn't support parallel execution, so cannot be used in optimization on cluster.
"""
AbstractParameterGenerator.__init__(self, param_grid=param_grid,
n_evaluations=n_evaluations,
random_state=random_state)
self.temperature = temperature
self.actual_state = None
def generate_next_point(self):
"""Generating next random point in parameters space"""
if self.actual_state is None:
new_state = self._generate_random_point(enqueue=True)
self.actual_state = new_state
return new_state, self._indices_to_parameters(new_state)
else:
actual_score = self.grid_scores_[self.actual_state]
# checking if needed to jump after previous evaluation
last_state = list(self.grid_scores_.keys())[-1]
last_score = self.grid_scores_[last_state]
# probability of transition
std = numpy.std(list(self.grid_scores_.values())) + 1e-5
p = numpy.exp(1. / self.temperature * (last_score - actual_score) / std)
if p > self.random_state.uniform(0, 1):
self.actual_state = last_state
for attempt in range(100):
# trying to change only one of parameters
axis = self.random_state.randint(0, len(self.dimensions))
new_state_indices = list(self.actual_state)
new_state_indices[axis] = self.random_state.randint(0, self.dimensions[axis])
new_state_indices = tuple(new_state_indices)
if new_state_indices not in self.queued_tasks_:
break
else:
print('failed to generate the simple way')
new_state_indices = self._generate_random_point(enqueue=False)
self.queued_tasks_.add(new_state_indices)
return new_state_indices, self._indices_to_parameters(new_state_indices)
def generate_batch_points(self, size):
raise RuntimeError("Annealing optimization doesn't support batch-based optimization (on cluster)")
class SubgridParameterOptimizer(AbstractParameterGenerator):
"""
Uses Metropolis-like optimization.
If the parameter grid is large, first performs optimization on subgrid.
Parameters:
----------
:param OrderedDict param_grid: the grid with parameters to optimize on
:param int n_evaluations: the number of evaluations to do
:param random_state: random generator
:type random_state: int or RandomState or None
:param int start_evaluations: count of random point generation on start
:param int subgrid_size: if the size of mesh too large, first we will optimize
on subgrid with not more then subgrid_size possible values for each parameter.
"""
def __init__(self, param_grid, n_evaluations=10, random_state=None, start_evaluations=3,
subgrid_size=3):
AbstractParameterGenerator.__init__(self, param_grid=param_grid, n_evaluations=n_evaluations,
random_state=random_state)
self.start_evaluations = start_evaluations
self.subgrid_size = subgrid_size
self.dimensions_sum = sum(self.dimensions)
self.subgrid_parameter_generator = None
if not numpy.all(numpy.array(self.dimensions) <= 2 * self.subgrid_size):
logger = logging.getLogger(__name__)
logger.info("Optimizing on subgrid")
param_subgrid, self.subgrid_indices = _create_subgrid(self.param_grid, self.subgrid_size)
self.subgrid_parameter_generator = \
SubgridParameterOptimizer(param_subgrid, n_evaluations=self.n_evaluations // 2,
subgrid_size=subgrid_size)
def generate_next_point(self):
"""Generating next point in parameters space"""
if len(self.queued_tasks_) >= numpy.prod(self.dimensions):
raise RuntimeError("The grid is exhausted, cannot generate more points")
# trying to generate from subgrid
if self.subgrid_parameter_generator is not None:
if len(self.queued_tasks_) < self.subgrid_parameter_generator.n_evaluations:
indices, parameters = self.subgrid_parameter_generator.generate_next_point()
self.queued_tasks_.add(_translate_key_from_subgrid(self.subgrid_indices, indices))
return ('subgrid', indices), parameters
if len(self.grid_scores_) <= 4:
indices = self._generate_random_point()
self.queued_tasks_.add(indices)
return indices, self._indices_to_parameters(indices)
results = numpy.array(list(self.grid_scores_.values()), dtype=float)
std = numpy.std(results) + 1e-5
probabilities = numpy.exp(numpy.clip((results - numpy.mean(results)) * 3. / std, -5, 5))
probabilities /= numpy.sum(probabilities)
temperature_p = numpy.clip(1. - len(self.queued_tasks_) / self.n_evaluations, 0.05, 1)
while True:
start = self.random_state.choice(len(probabilities), p=probabilities)
start_indices = list(self.grid_scores_.keys())[start]
new_state_indices = list(start_indices)
for _ in range(self.dimensions_sum // 6 + 1):
if self.random_state.uniform() < temperature_p:
axis = self.random_state.randint(len(self.dimensions))
new_state_indices[axis] += int(numpy.sign(self.random_state.uniform() - 0.5))
if any(new_state_indices[axis] < 0 or new_state_indices[axis] >= self.dimensions[axis]
for axis in range(len(self.dimensions))):
continue
new_state_indices = tuple(new_state_indices)
if new_state_indices in self.queued_tasks_:
continue
self.queued_tasks_.add(new_state_indices)
return new_state_indices, self._indices_to_parameters(new_state_indices)
def add_result(self, state_indices, value):
if state_indices[0] == 'subgrid':
self.grid_scores_[_translate_key_from_subgrid(self.subgrid_indices, state_indices[1])] = value
self.subgrid_parameter_generator.add_result(state_indices[1], value)
else:
self.grid_scores_[state_indices] = value
# region supplementary functions
def _check_param_grid(param_grid):
""" Checks parameters of grid """
for key, v in param_grid.items():
assert isinstance(key, str), 'Name of feature should be string'
if isinstance(v, numpy.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if not any([isinstance(v, k) for k in (list, tuple, numpy.ndarray)]):
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty list.")
def _create_subgrid(param_grid, n_values):
"""
Additional function to generate subgrid
:type param_grid: OrderedDict,
:type n_values: int, the maximal number of values along each axis
:rtype: (OrderedDict, OrderedDict), the subgrid and the indices of values that form subgrid
"""
subgrid = OrderedDict()
subgrid_indices = OrderedDict()
for key, values in param_grid.items():
if len(values) <= n_values:
subgrid[key] = list(values)
subgrid_indices[key] = range(len(values))
else:
# numpy.rint rounds to the nearest integer
axis_indices = numpy.rint(numpy.linspace(-0.5, len(values) - 0.5, 2 * n_values + 1)[1::2]).astype(int)
subgrid[key] = [values[index] for index in axis_indices]
subgrid_indices[key] = axis_indices
return subgrid, subgrid_indices
def _translate_key_from_subgrid(subgrid_indices, key):
"""
:type key: tuple, the indices (describing the point) in subgrid
:type subgrid_indices: OrderedDict, the indices of values taken to form subgrid
:rtype: tuple, the indices in grid
"""
return tuple([subgrid_indices[var_name][index] for var_name, index in zip(subgrid_indices, key)])
# endregion
class FoldingScorer(object):
"""
Scorer, which implements logic of data folding and scoring. This is a function-like object
Parameters:
----------
:param int folds: 'k' used in k-folding while validating
:param int fold_checks: not greater than folds, the number of checks we do by cross-validating
:param function score_function: quality. if fold_checks > 1, the average is computed over checks.
>>> def new_score_function(y_true, proba, sample_weight=None):
>>> '''
>>> y_true: [n_samples]
>>> proba: [n_samples, n_classes]
>>> sample_weight: [n_samples] or None
>>> '''
>>> ...
Example:
--------
>>> fs = FoldingScorer(new_score_function)
>>> fs(base_estimator, params, X, y, sample_weight=None)
0.5
"""
def __init__(self, score_function, folds=3, fold_checks=1):
self.folds = folds
self.fold_checks = fold_checks
self.score_function = score_function
def __call__(self, base_estimator, params, X, y, sample_weight=None):
"""
:return float: quality
"""
k_folder = StratifiedKFold(y=y, n_folds=self.folds)
score = 0
for ind, (train_indices, test_indices) in enumerate(islice(k_folder, 0, self.fold_checks)):
classifier = clone(base_estimator)
classifier.set_params(**params)
trainX, trainY = X.iloc[train_indices, :], y[train_indices]
testX, testY = X.iloc[test_indices, :], y[test_indices]
if sample_weight is not None:
train_weights, test_weights = sample_weight[train_indices], sample_weight[test_indices]
classifier.fit(trainX, trainY, sample_weight=train_weights)
score += self.score_function(testY, classifier.predict_proba(testX), sample_weight=test_weights)
else:
classifier.fit(trainX, trainY)
score += self.score_function(testY, classifier.predict_proba(testX))
return score / self.fold_checks
def apply_scorer(scorer, params, base_estimator, X, y, sample_weight):
"""
Application of scorer algorithm.
:param scorer: algorithm to train estimator and get quality (see FoldingScorer for example)
:param dict params: parameters for estimator
:param base.BaseEstimator base_estimator: estimator
:param X: pandas.DataFrame of shape [n_samples, n_features], data
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: ('success', float) or ('fail', Exception), float will contain result.
"""
try:
estimator = clone(base_estimator)
return 'success', scorer(params=params, base_estimator=estimator, X=X, y=y, sample_weight=sample_weight)
except Exception as e:
return 'fail', e
class GridOptimalSearchCV(object):
"""
Optimal search over specified parameter values for an estimator. Metropolis-like algorithm is used
Important members are fit, scorer.
GridSearchCV implements a "fit" method and a "fit_best_estimator" method to train models.
Parameters
----------
:param BaseEstimator estimator: object of type that implements the "fit" and "fit_best_estimator" methods
A new object of that type is cloned for each point.
:param AbstractParameterGenerator params_generator: generator of grid search algorithm
:param object scorer: which implement method __call__ with kwargs:
"base_estimator", "params", "X", "y", "sample_weight"
:param parallel_profile: name of profile
:type parallel_profile: None or str
Attributes
----------
generator: return grid generator
"""
def __init__(self, estimator, params_generator, scorer, parallel_profile=None):
self.base_estimator = estimator
self.params_generator = params_generator
self.scorer = scorer
self.parallel_profile = parallel_profile
self.evaluations_done = 0
def _log(self, msg, level=20):
logger = logging.getLogger(__name__)
logger.log(level, msg)
@property
def generator(self):
"""Property for params_generator"""
return self.params_generator
def fit_best_estimator(self, X, y, sample_weight=None):
"""
Train estimator with the best parameters
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: the best estimator
"""
best_estimator_ = clone(self.base_estimator)
best_estimator_.set_params(**self.generator.best_params_)
best_estimator_.fit(X, y, sample_weight=sample_weight)
return best_estimator_
def fit(self, X, y, sample_weight=None):
"""
Run fit with all sets of parameters.
:param X: array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and n_features is the number of features.
:param y: array-like, shape = [n_samples] or [n_samples, n_output], optional
:param sample_weight: array-like, shape = [n_samples], weight
"""
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=True)
if self.parallel_profile is None:
while self.evaluations_done < self.params_generator.n_evaluations:
state_indices, state_dict = self.params_generator.generate_next_point()
status, value = apply_scorer(self.scorer, state_dict, self.base_estimator, X, y, sample_weight)
assert status == 'success', 'Error during grid search ' + str(value)
self.params_generator.add_result(state_indices, value)
self.evaluations_done += 1
state_string = ", ".join([k + '=' + str(v) for k, v in state_dict.items()])
self._log('{}: {}'.format(value, state_string))
else:
from IPython.parallel import Client
direct_view = Client(profile=self.parallel_profile).direct_view()
portion = len(direct_view)
print("There are {0} cores in cluster, the portion is equal {1}".format(len(direct_view), portion))
while self.evaluations_done < self.params_generator.n_evaluations:
state_indices_array, state_dict_array = self.params_generator.generate_batch_points(size=portion)
result = direct_view.map_sync(apply_scorer, [self.scorer] * portion, state_dict_array,
[self.base_estimator] * portion,
[X] * portion, [y] * portion, [sample_weight] * portion)
assert len(result) == portion, "The length of result is very strange"
for state_indices, state_dict, (status, score) in zip(state_indices_array, state_dict_array, result):
params = ", ".join([k + '=' + str(v) for k, v in state_dict.items()])
if status != 'success':
message = 'Fail during training on the node \nException {exc}\n Parameters {params}'
self._log(message.format(exc=score, params=params), level=40)
else:
self.params_generator.add_result(state_indices, score)
self._log("{}: {}".format(score, params))
self.evaluations_done += portion
print("%i evaluations done" % self.evaluations_done)
return self
|
{
"content_hash": "1866e96330f8e30c755ebd116bee0bcd",
"timestamp": "",
"source": "github",
"line_count": 558,
"max_line_length": 125,
"avg_line_length": 44.261648745519715,
"alnum_prop": 0.6326423192161309,
"repo_name": "nickcdryan/rep",
"id": "4c274142944a6f94303cb7a46fe3fb65a3fd78fc",
"size": "24698",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rep/metaml/gridsearch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "335075"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
}
|
matplotlib bug
|
{
"content_hash": "7678c44f3d3b65f54f1d0875335e8011",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 14,
"avg_line_length": 8,
"alnum_prop": 0.8125,
"repo_name": "rsignell-usgs/notebook",
"id": "dc646367a7cded1224f09a4727b6243ac9433b83",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "People/Untitled1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13956"
},
{
"name": "Jupyter Notebook",
"bytes": "202646883"
},
{
"name": "Python",
"bytes": "1476735"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import itertools
import numpy as np
import sklearn.datasets
import sklearn.cross_validation
import sklearn.metrics
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import treeano.sandbox.nodes.weight_normalization as wn
import canopy
import canopy.sandbox.datasets
fX = theano.config.floatX
train, valid, test = canopy.sandbox.datasets.mnist()
model = tn.HyperparameterNode(
"model",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(None, 1, 28, 28)),
wn.WeightNormalizedDnnConv2DWithBiasNode("conv1"),
tn.ReLUNode("relu1"),
tn.DnnMaxPoolNode("mp1"),
wn.WeightNormalizedDnnConv2DWithBiasNode("conv2"),
tn.ReLUNode("relu2"),
tn.DnnMaxPoolNode("mp2"),
tn.DenseNode("fc1"),
tn.ReLUNode("relu3"),
tn.DropoutNode("do1"),
tn.DenseNode("fc2", num_units=10),
tn.SoftmaxNode("pred"),
]),
num_filters=32,
filter_size=(5, 5),
pool_size=(2, 2),
num_units=256,
dropout_probability=0.5,
inits=[treeano.inits.XavierNormalInit()],
)
with_updates = tn.HyperparameterNode(
"with_updates",
tn.AdamNode(
"adam",
{"subtree": model,
"cost": tn.TotalCostNode("cost", {
"pred": tn.ReferenceNode("pred_ref", reference="model"),
"target": tn.InputNode("y", shape=(None,), dtype="int32")},
)}),
cost_function=treeano.utils.categorical_crossentropy_i32,
)
network = with_updates.network()
network.build() # build eagerly to share weights
BATCH_SIZE = 500
valid_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="valid_time"),
canopy.handlers.override_hyperparameters(dropout_probability=0),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"cost": "cost", "pred": "pred"})
def validate(in_dict, results_dict):
valid_out = valid_fn(valid)
probabilities = valid_out["pred"]
predicted_classes = np.argmax(probabilities, axis=1)
results_dict["valid_cost"] = valid_out["cost"]
results_dict["valid_time"] = valid_out["valid_time"]
results_dict["valid_accuracy"] = sklearn.metrics.accuracy_score(
valid["y"], predicted_classes)
train_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="total_time"),
canopy.handlers.call_after_every(1, validate),
canopy.handlers.time_call(key="train_time"),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"train_cost": "cost"},
include_updates=True)
# ################################# training #################################
print("Starting training...")
canopy.evaluate_until(fn=train_fn,
gen=itertools.repeat(train),
max_iters=25)
|
{
"content_hash": "854e1bfccf32436576c775a01354a6d8",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 31.275510204081634,
"alnum_prop": 0.6110929853181076,
"repo_name": "diogo149/treeano",
"id": "c01c08a98d822b20eeaaef21843257fcebdec789",
"size": "3065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/weight_normalization/mnist_cnn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1121"
},
{
"name": "JavaScript",
"bytes": "16042"
},
{
"name": "Python",
"bytes": "864524"
}
],
"symlink_target": ""
}
|
""" A little example script showing a Capture-Game tournament between
- a random player
- a kill-on-sight player
- a small-network-player with random weights
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.environments.twoplayergames import CaptureGame
from pybrain.rl.environments.twoplayergames.capturegameplayers import RandomCapturePlayer, KillingPlayer, ModuleDecidingPlayer
from pybrain.rl.environments.twoplayergames.capturegameplayers.clientwrapper import ClientCapturePlayer
from pybrain.rl.experiments.tournament import Tournament
from pybrain.tools.shortcuts import buildNetwork
from pybrain import SigmoidLayer
game = CaptureGame(5)
randAgent = RandomCapturePlayer(game, name = 'rand')
killAgent = KillingPlayer(game, name = 'kill')
# the network's outputs are probabilities of choosing the action, thus a sigmoid output layer
net = buildNetwork(game.outdim, game.indim, outclass = SigmoidLayer)
netAgent = ModuleDecidingPlayer(net, game, name = 'net')
# same network, but greedy decisions:
netAgentGreedy = ModuleDecidingPlayer(net, game, name = 'greedy', greedySelection = True)
agents = [randAgent, killAgent, netAgent, netAgentGreedy]
try:
javaAgent = ClientCapturePlayer(game, name = 'java')
agents.append(javaAgent)
except:
print 'No Java server available.'
print
print 'Starting tournament...'
tourn = Tournament(game, agents)
tourn.organize(50)
print tourn
# try a different network, and play again:
net.randomize()
tourn.reset()
tourn.organize(50)
print tourn
|
{
"content_hash": "75e3122f51f595db1f32495a7e6cbfb1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 126,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.7868421052631579,
"repo_name": "hassaanm/stock-trading",
"id": "1145977118928319cc1f14cd594a3f64cc7ee6eb",
"size": "1542",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pybrain-pybrain-87c7ac3/examples/rl/environments/capturegame/minitournament.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "822"
},
{
"name": "C++",
"bytes": "18096"
},
{
"name": "JavaScript",
"bytes": "19227"
},
{
"name": "Perl",
"bytes": "1924"
},
{
"name": "Python",
"bytes": "2461668"
},
{
"name": "Shell",
"bytes": "3384"
}
],
"symlink_target": ""
}
|
"""
This script extracts surrounding buildings of the zone geometry from Open street maps
"""
import math
import os
import numpy as np
import osmnx.footprints
import pandas as pd
from geopandas import GeoDataFrame as gdf
from geopandas.tools import sjoin as spatial_join
import cea.config
import cea.inputlocator
from cea.datamanagement.zone_helper import parse_building_floors
from cea.demand import constants
from cea.utilities.standardize_coordinates import get_projected_coordinate_system, get_geographic_coordinate_system
__author__ = "Jimeno Fonseca"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def calc_surrounding_area(zone_gdf, buffer_m):
"""
Adds buffer to zone to get surroundings area
:param geopandas.GeoDataFrame zone_gdf: Zone GeoDataFrame
:param float buffer_m: Buffer to add to zone building geometries
:return: Surrounding area GeoDataFrame
"""
surrounding_area = gdf(geometry=[zone_gdf.geometry.buffer(buffer_m).unary_union], crs=zone_gdf.crs)
return surrounding_area
def get_zone_and_surr_in_projected_crs(locator):
# generate GeoDataFrames from files
zone_gdf = gdf.from_file(locator.get_zone_geometry())
surroundings_gdf = gdf.from_file(locator.get_surroundings_geometry())
# get longitude and latitude of zone centroid
zone_gdf_in_geographic_crs = zone_gdf.to_crs(get_geographic_coordinate_system())
lon = zone_gdf_in_geographic_crs.geometry[0].centroid.coords.xy[0][0]
lat = zone_gdf_in_geographic_crs.geometry[0].centroid.coords.xy[1][0]
# check if the coordinate reference systems (crs) of the zone and its surroundings match
if zone_gdf.crs != surroundings_gdf.crs or zone_gdf.crs != get_projected_coordinate_system(lat=lat, lon=lon):
# if they don't match project the zone and its surroundings to the global crs...
zone_gdf = zone_gdf.to_crs(get_projected_coordinate_system(lat=lat, lon=lon))
surroundings_gdf = surroundings_gdf.to_crs(get_projected_coordinate_system(lat=lat, lon=lon))
# and save the projected GDFs to their corresponding shapefiles
zone_gdf.to_file(locator.get_zone_geometry())
surroundings_gdf.to_file(locator.get_surroundings_geometry())
return zone_gdf, surroundings_gdf
def clean_attributes(shapefile, buildings_height, buildings_floors, key):
# local variables
no_buildings = shapefile.shape[0]
list_of_columns = shapefile.columns
if buildings_height is None and buildings_floors is None:
print('Warning! you have not indicated a height or number of floors above ground for the buildings, '
'we are reverting to data stored in Open Street Maps (It might not be accurate at all),'
'if we do not find data in OSM for a particular building, we get the median in the surroundings, '
'if we do not get any data we assume 4 floors per building')
# Check which attributes the OSM has, Sometimes it does not have any and indicate the data source
if 'building:levels' not in list_of_columns:
shapefile['building:levels'] = [3] * no_buildings
shapefile['REFERENCE'] = "CEA - assumption"
elif pd.isnull(shapefile['building:levels']).all():
shapefile['building:levels'] = [3] * no_buildings
shapefile['REFERENCE'] = "CEA - assumption"
else:
shapefile['REFERENCE'] = ["OSM - median" if x is np.nan else "OSM - as it is" for x in
shapefile['building:levels']]
if 'roof:levels' not in list_of_columns:
shapefile['roof:levels'] = 0
# get the median from the area:
data_osm_floors1 = shapefile['building:levels'].fillna(0)
data_osm_floors2 = shapefile['roof:levels'].fillna(0)
data_floors_sum = [x + y for x, y in
zip([parse_building_floors(x) for x in data_osm_floors1],
[parse_building_floors(x) for x in data_osm_floors2])]
data_floors_sum_with_nan = [np.nan if x < 1.0 else x for x in data_floors_sum]
data_osm_floors_joined = int(
math.ceil(np.nanmedian(data_floors_sum_with_nan))) # median so we get close to the worse case
shapefile["floors_ag"] = [int(x) if x is not np.nan else data_osm_floors_joined for x in
data_floors_sum_with_nan]
shapefile["height_ag"] = shapefile["floors_ag"] * constants.H_F
else:
shapefile['REFERENCE'] = "User - assumption"
if buildings_height is None and buildings_floors is not None:
shapefile["floors_ag"] = [buildings_floors] * no_buildings
shapefile["height_ag"] = shapefile["floors_ag"] * constants.H_F
elif buildings_height is not None and buildings_floors is None:
shapefile["height_ag"] = [buildings_height] * no_buildings
shapefile["floors_ag"] = [int(math.floor(x)) for x in shapefile["height_ag"] / constants.H_F]
else: # both are not none
shapefile["height_ag"] = [buildings_height] * no_buildings
shapefile["floors_ag"] = [buildings_floors] * no_buildings
# add description
if "description" in list_of_columns:
shapefile["description"] = shapefile['description']
elif 'addr:housename' in list_of_columns:
shapefile["description"] = shapefile['addr:housename']
elif 'amenity' in list_of_columns:
shapefile["description"] = shapefile['amenity']
else:
shapefile["description"] = [np.nan] * no_buildings
shapefile["category"] = shapefile['building']
shapefile["Name"] = [key + str(x + 1000) for x in
range(no_buildings)] # start in a big number to avoid potential confusion\
result = shapefile[
["Name", "height_ag", "floors_ag", "description", "category", "geometry", "REFERENCE"]]
result.reset_index(inplace=True, drop=True)
return result
def erase_no_surrounding_areas(all_surroundings, zone, area_with_buffer):
"""
Remove buildings inside zone and outside of buffer from Surroundings GeoDataFrame
:param geopandas.GeoDataFrame all_surroundings: Surroundings GeoDataFrame
:param geopandas.GeoDataFrame zone: Zone GeoDataFrame
:param geopandas.GeoDataFrame area_with_buffer: Buffer area GeoDataFrame
:return: GeoDataFrame with surrounding buildings
"""
buffer_polygon = area_with_buffer.to_crs(zone.crs).geometry.values[0]
zone_area = gdf(geometry=[zone.geometry.unary_union], crs=zone.crs)
within_buffer = all_surroundings.geometry.intersects(buffer_polygon)
surroundings = all_surroundings[within_buffer]
rep_points = gdf(geometry=surroundings.geometry.representative_point(), crs=all_surroundings.crs)
not_in_zone = spatial_join(rep_points, zone_area, how='left')['index_right'].isna()
return surroundings[not_in_zone].copy()
def geometry_extractor_osm(locator, config):
"""this is where the action happens if it is more than a few lines in ``main``.
NOTE: ADD YOUR SCRIPT'S DOCUMENATION HERE (how)
NOTE: RENAME THIS FUNCTION (SHOULD PROBABLY BE THE SAME NAME AS THE MODULE)
"""
# local variables:
buffer_m = config.surroundings_helper.buffer
buildings_height = config.surroundings_helper.height_ag
buildings_floors = config.surroundings_helper.floors_ag
shapefile_out_path = locator.get_surroundings_geometry()
zone = gdf.from_file(locator.get_zone_geometry())
# trnasform zone file to geographic coordinates
zone = zone.to_crs(get_geographic_coordinate_system())
lon = zone.geometry[0].centroid.coords.xy[0][0]
lat = zone.geometry[0].centroid.coords.xy[1][0]
zone = zone.to_crs(get_projected_coordinate_system(float(lat), float(lon)))
# get a polygon of the surrounding area, and one polygon representative of the zone area
print("Calculating surrounding area")
area_with_buffer = calc_surrounding_area(zone, buffer_m)
# get footprints of all the surroundings
print("Getting building footprints")
area_with_buffer_polygon = area_with_buffer.to_crs(get_geographic_coordinate_system()).geometry.values[0]
all_surroundings = osmnx.footprints.footprints_from_polygon(polygon=area_with_buffer_polygon)
all_surroundings = all_surroundings.to_crs(get_projected_coordinate_system(float(lat), float(lon)))
# erase overlapping area
print("Removing unwanted buildings")
surroundings = erase_no_surrounding_areas(all_surroundings, zone, area_with_buffer)
assert surroundings.shape[0] > 0, 'No buildings were found within range based on buffer parameter.'
# clean attributes of height, name and number of floors
result = clean_attributes(surroundings, buildings_height, buildings_floors, key="CEA")
result = result.to_crs(get_projected_coordinate_system(float(lat), float(lon)))
# save to shapefile
result.to_file(shapefile_out_path)
def main(config):
"""
Create the surroundings.shp file
:param config:
:type config: cea.config.Configuration
:return:
"""
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(config.scenario)
geometry_extractor_osm(locator, config)
if __name__ == '__main__':
main(cea.config.Configuration())
|
{
"content_hash": "e0164d6edaf0985495f1fcf48bb61e3c",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 115,
"avg_line_length": 46.36407766990291,
"alnum_prop": 0.6866296722856245,
"repo_name": "architecture-building-systems/CityEnergyAnalyst",
"id": "bbc134c1069b7a2d4253a47a8785c44d5af6f1ff",
"size": "9551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cea/datamanagement/surroundings_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5622"
},
{
"name": "Dockerfile",
"bytes": "2277"
},
{
"name": "HTML",
"bytes": "47667"
},
{
"name": "Jupyter Notebook",
"bytes": "409952"
},
{
"name": "NSIS",
"bytes": "9782"
},
{
"name": "Python",
"bytes": "2681047"
},
{
"name": "Shell",
"bytes": "8768"
}
],
"symlink_target": ""
}
|
print "|--------------------------------------------|"
print "| Starting Tutorial 3 |"
print "|--------------------------------------------|"
print 'media path = ' + scene.getMediaPath()
# Add asset paths
assetManager = scene.getAssetManager()
assetManager.addAssetPath('motion', 'ChrBrad')
assetManager.addAssetPath('mesh', 'mesh')
assetManager.addAssetPath('script', 'scripts')
# Load assets based on asset paths
assetManager.loadAssets()
# run a Python script file
scene.run('zebra2-map.py')
motionNames = assetManager.getMotionNames()
skelNames = assetManager.getSkeletonNames()
for i in range(0,len(motionNames)):
print 'motion ' + str(i) + ' = ' + motionNames[i]
for i in range(0,len(skelNames)):
print 'skeleton ' + str(i) + ' = ' + skelNames[i]
|
{
"content_hash": "9150f67910e0852981612c62fe5493c0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 54,
"avg_line_length": 33.583333333333336,
"alnum_prop": 0.5955334987593052,
"repo_name": "gsi-upm/SmartSim",
"id": "1fa44af9b9ee1d39f5a16da526cc7073346f1070",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartbody/data/examples/Tutorials/3_LoadAssets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11708"
},
{
"name": "C",
"bytes": "941473"
},
{
"name": "C#",
"bytes": "733730"
},
{
"name": "C++",
"bytes": "16389947"
},
{
"name": "CMake",
"bytes": "114424"
},
{
"name": "D",
"bytes": "175403"
},
{
"name": "GLSL",
"bytes": "45459"
},
{
"name": "Groff",
"bytes": "2619"
},
{
"name": "HTML",
"bytes": "1128698"
},
{
"name": "Inno Setup",
"bytes": "8592"
},
{
"name": "Java",
"bytes": "371478"
},
{
"name": "M4",
"bytes": "16806"
},
{
"name": "Makefile",
"bytes": "240549"
},
{
"name": "Objective-C",
"bytes": "4511"
},
{
"name": "Objective-C++",
"bytes": "29141"
},
{
"name": "Pascal",
"bytes": "13551"
},
{
"name": "Protocol Buffer",
"bytes": "3178"
},
{
"name": "Python",
"bytes": "989019"
},
{
"name": "Rust",
"bytes": "105"
},
{
"name": "Shell",
"bytes": "248995"
},
{
"name": "Smalltalk",
"bytes": "1540"
},
{
"name": "Smarty",
"bytes": "179"
},
{
"name": "XSLT",
"bytes": "3925"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
class ParameterValueWidget( GafferUI.Widget ) :
def __init__( self, plugValueWidget, parameterHandler, **kw ) :
assert( isinstance( plugValueWidget, GafferUI.PlugValueWidget ) )
GafferUI.Widget.__init__( self, plugValueWidget, **kw )
self.__plugValueWidget = plugValueWidget
self.__parameterHandler = parameterHandler
def plug( self ) :
return self.__parameterHandler.plug()
def parameter( self ) :
return self.__parameterHandler.parameter()
def parameterHandler( self ) :
return self.__parameterHandler
def plugValueWidget( self ) :
return self.__plugValueWidget
__popupMenuSignal = Gaffer.Signal2()
## This signal is emitted whenever a popup menu for a parameter is about
# to be shown. This provides an opportunity to customise the menu from
# external code. The signature for slots is ( menuDefinition, parameterValueWidget ),
# and slots should just modify the menu definition in place.
@classmethod
def popupMenuSignal( cls ) :
return cls.__popupMenuSignal
@classmethod
def create( cls, parameterHandler ) :
parameter = parameterHandler.parameter()
if parameter.presetsOnly :
return GafferUI.PresetsOnlyParameterValueWidget( parameterHandler )
uiTypeHint = None
with IECore.IgnoredExceptions( KeyError ) :
uiTypeHint = parameter.userData()["UI"]["typeHint"].value
parameterHierarchy = [ parameter.typeId() ] + IECore.RunTimeTyped.baseTypeIds( parameter.typeId() )
if uiTypeHint is not None :
for typeId in parameterHierarchy :
creator = cls.__typesToCreators.get( ( typeId, uiTypeHint ), None )
if creator is not None :
return creator( parameterHandler )
for typeId in parameterHierarchy :
creator = cls.__typesToCreators.get( ( typeId, None ), None )
if creator is not None :
return creator( parameterHandler )
w = GafferUI.PlugValueWidget.create( parameterHandler.plug() )
if w is not None :
return ParameterValueWidget( w, parameterHandler )
return None
@classmethod
def registerType( cls, parameterTypeId, creator, uiTypeHint = None ) :
cls.__typesToCreators[(parameterTypeId, uiTypeHint)] = creator
__typesToCreators = {}
# parameter popup menus
##########################################################################
# we piggy-back onto the existing PlugValueWidget popup menu signal to
# emit our own popup menu signal where appropriate.
def __plugPopupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
node = plug.node()
if not hasattr( node, "parameterHandler" ) :
return
# see if we can find a ParameterValueWidget associated with the PlugValueWidget,
# and if we can then emit the popupMenuSignal() on it.
parameterValueWidget = plugValueWidget.ancestor( GafferUI.ParameterValueWidget )
if parameterValueWidget is None :
return
ParameterValueWidget.popupMenuSignal()( menuDefinition, parameterValueWidget )
__plugPopupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu )
# add menu items for presets
def __parameterPopupMenu( menuDefinition, parameterValueWidget ) :
parameterHandler = parameterValueWidget.parameterHandler()
if isinstance( parameterHandler.parameter(), IECore.CompoundVectorParameter ) :
# the default value and overall presets don't currently work very well
# for CompoundVectorParameters.
return
# replace plug default item with parameter default item. they
# differ in that the parameter default applies to all children
# of things like V3iParameters rather than just a single one.
menuDefinition.remove( "/Default", raiseIfMissing=False )
menuDefinition.append(
"/Default",
{
"command" : IECore.curry( __setValue, parameterHandler, parameterHandler.parameter().defaultValue ),
"active" : parameterValueWidget.plugValueWidget()._editable(),
}
)
# add menu items for presets
if len( parameterHandler.parameter().presetNames() ) :
menuDefinition.append( "/PresetDivider", { "divider" : True } )
for name in parameterHandler.parameter().presetNames() :
menuDefinition.append( "/" + name, { "command" : IECore.curry( __setValue, parameterHandler, name ) } )
__parameterPopupMenuConnection = ParameterValueWidget.popupMenuSignal().connect( __parameterPopupMenu )
def __setValue( parameterHandler, value ) :
parameterHandler.parameter().setValue( value )
with Gaffer.UndoContext( parameterHandler.plug().ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
parameterHandler.setPlugValue()
|
{
"content_hash": "5713022cf1fbbb2262af20ec79dcb21c",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 105,
"avg_line_length": 32.16083916083916,
"alnum_prop": 0.7323331158947597,
"repo_name": "davidsminor/gaffer",
"id": "412431acb8a76ea00188a9754ae94b001266b834",
"size": "6478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUI/ParameterValueWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
}
|
"""Outputs the current date and time information as a key-value file
appropriate for use with template_replace.py.
"""
import datetime
import logging
import optparse
import os
import re
import sys
# Use the GIT helper functions from 'lastchange.py'.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
import lastchange
_LOGGER = logging.getLogger(os.path.basename(__file__))
def _ParseArguments():
parser = optparse.OptionParser()
parser.add_option('-o', '--output', dest='output', default=None,
help='The file to write to. If not specified outputs to '
'stdout.')
parser.add_option("-s", "--source-dir", metavar="DIR",
help="use repository in the given directory")
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='Enable verbose logging.')
(opts, dummy_args) = parser.parse_args()
if opts.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
if opts.output:
opts.output = os.path.abspath(opts.output)
return opts
def main():
opts = _ParseArguments()
t = None
if not lastchange.IsOfficialBuild():
_LOGGER.info('Official build, reporting git time.')
if opts.source_dir:
src_dir = opts.source_dir
else:
src_dir = os.path.dirname(os.path.abspath(__file__))
# Get the commit timestamp in seconds since UTC epoch.
git_time = lastchange.RunGitCommand(src_dir,
['log', '-1', '--date=raw', '--pretty=format:%cd']).strip()
m = re.match('(^\d+) ([+-]\d{4})$', git_time)
if not m:
raise RuntimeException('Unexpected "git time" output: %s' % git_time)
seconds_utc = int(m.group(1))
t = datetime.datetime.utcfromtimestamp(seconds_utc)
else:
_LOGGER.info('Developer build, reporting fake time.')
now = datetime.datetime.utcnow()
t = datetime.datetime(2000, 1, 1, 0, 0, 0, 0, now.tzinfo)
contents = """# This file was automatically generated by timestamp.py.
DATE=%s
TIME=%s
""" % (t.strftime('%Y/%m/%d'), t.strftime('%H:%M:%S UTC'))
if opts.output:
lastchange.WriteIfChanged(opts.output, contents)
else:
sys.stdout.write(contents)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "a318f8584ff02829f4aa515ae4c3f594",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 30.38157894736842,
"alnum_prop": 0.6396708531831962,
"repo_name": "sebmarchand/syzygy",
"id": "7b75a7ca0307e3ae00a1a757b83a70c04b4ed759",
"size": "2914",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "syzygy/build/timestamp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "13748"
},
{
"name": "C",
"bytes": "8422"
},
{
"name": "C++",
"bytes": "7598735"
},
{
"name": "CSS",
"bytes": "1333"
},
{
"name": "HTML",
"bytes": "3182"
},
{
"name": "Protocol Buffer",
"bytes": "6472"
},
{
"name": "Python",
"bytes": "841963"
},
{
"name": "Shell",
"bytes": "19040"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from .views import decorated, roundtrip, undecorated
app_name = "tests"
urlpatterns = [
path("admin/", admin.site.urls),
path("decorated/$", decorated, name="decorated"),
path("roundtrip/", roundtrip, name="roundtrip"),
path("undecorated/", undecorated, name="undecorated"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
{
"content_hash": "bdd54d8db0d974e984ff07b2933b8c02",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.738,
"repo_name": "yunojuno/django-expiring-links",
"id": "1bb7daa838672a155ae95cd156adc8852fb58b15",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57040"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.utils.encoding import force_text
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.tests.factories import create_content_item
from fluent_contents.tests.utils import render_content_items
class MarkupPluginTests(TestCase):
"""
Testing markup plugin logic
"""
def test_markup(self):
RstItem = LANGUAGE_MODEL_CLASSES["restructuredtext"]
item = create_content_item(
RstItem,
text="""
RST
----
* Markup!""",
)
expected = """<div class="markup"><h1 class="title">RST</h1><ul class="simple"><li>Markup!</li></ul></div>"""
self.assertHTMLEqual(force_text(render_content_items([item])), expected)
|
{
"content_hash": "921399cb08fb635e83f0a21759e95d89",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 117,
"avg_line_length": 28.296296296296298,
"alnum_prop": 0.675392670157068,
"repo_name": "edoburu/django-fluent-contents",
"id": "e69e287c7cc92c3302644e21b32d98a933981f61",
"size": "764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fluent_contents/plugins/markup/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13178"
},
{
"name": "HTML",
"bytes": "35807"
},
{
"name": "JavaScript",
"bytes": "80446"
},
{
"name": "Python",
"bytes": "494720"
}
],
"symlink_target": ""
}
|
import os
def _(path):
# Make sure the path contains only the correct separator
return path.replace('/', os.sep).replace('\\', os.sep)
|
{
"content_hash": "24e2aaa67d01fd55b02163eff5975873",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 60,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6418918918918919,
"repo_name": "hyperoslo/django-pipeline",
"id": "6224dba85bd8b41e616714a8438e53fb1d8992bd",
"size": "148",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/tests/paths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1001"
},
{
"name": "CoffeeScript",
"bytes": "52"
},
{
"name": "JavaScript",
"bytes": "140"
},
{
"name": "Python",
"bytes": "80229"
},
{
"name": "Shell",
"bytes": "4529"
}
],
"symlink_target": ""
}
|
import os
import datetime
import model
from datetime import datetime, date, time, timedelta
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
from google.appengine.api import users
class MainHandler(webapp.RequestHandler):
def get(self):
if users.is_current_user_admin():
#initUnitPrice()
#initRefuelRecord()
#initUnitPriceForOthers()
def main():
application = webapp.WSGIApplication([('/setup/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
def initUnitPrice():
unitPrice = model.UnitPrice(type = 'refuel',
content = 'p93',
price = 7.89)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'refuel',
content = 'p97',
price = 8.3)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'refuel',
content = 'p98',
price = 9.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'refuel',
content = 'd0',
price = 6.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'refuel',
content = 'd10',
price = 7.0)
unitPrice.put()
def initUnitPriceForOthers():
unitPrice = model.UnitPrice(type = 'others',
content = 'wash10',
price = 10.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'wash15',
price = 15.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'wash20',
price = 20.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'wash50',
price = 50.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'waxing10',
price = 10.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'waxing15',
price = 15.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'waxing20',
price = 20.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'waxing50',
price = 50.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'other10',
price = 10.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'other15',
price = 15.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'other20',
price = 20.0)
unitPrice.put()
unitPrice = model.UnitPrice(type = 'others',
content = 'other50',
price = 50.0)
unitPrice.put()
def initRefuelRecord():
newRefuelRecord = model.RefuelRecord(date = datetime.strptime('2011/05/06', '%Y/%m/%d').date(),
odometer = 20000,
is_full = True,
fuel_type = 'p97',
unit_price = 8.3,
total_price = 300.0)
newRefuelRecord.put()
if __name__ == '__main__':
main()
|
{
"content_hash": "94554ca8125ec3aa87ea24901f716233",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 99,
"avg_line_length": 34.45,
"alnum_prop": 0.4395258829221093,
"repo_name": "wengzhiwen/Your-Vehicle-Status",
"id": "214121cb28e2be4355bac10a3738726a7484c323",
"size": "4150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22222"
}
],
"symlink_target": ""
}
|
import logging
# import numpy as np
import pystella.model.sn_eve as sneve
import pystella.rf.light_curve_plot as lcp
from pystella import phys
try:
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
except ImportError as ex:
import os
import sys
exc_type, exc_obj, exc_tb = sys.exc_info()
fn = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info(exc_type, fn, exc_tb.tb_lineno, ex)
logging.info(' Probably, you should install module: {}'.format('matplotlib'))
print()
# print(ex)
plt = None
mlines = None
# matplotlib.use("Agg")
# import matplotlib
# matplotlib.rcParams['backend'] = "TkAgg"
# matplotlib.rcParams['backend'] = "Qt5Agg"
# matplotlib.rcParams['backend'] = "Qt4Agg"
__author__ = 'bakl'
markers = {u'x': u'x', u'o': u'circle', u'v': u'triangle_down', u'd': u'thin_diamond',
u'+': u'plus', u'*': u'star', u'<': u'triangle_left'}
markers_style = list(markers.keys())
lines_style = lcp.linestyles
def get_parser():
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description='Process PreSN configuration.', formatter_class=RawTextHelpFormatter)
parser.add_argument('-b', '--box',
required=False,
type=str,
default=None,
dest='box',
help='Make boxcar average, for example: '
'Delta_mass:Number:[M_uplim]:[True, if info], -b 0.5:4 . '
'Use key -e _ELEM [-e _Ni56]to exclude elements')
parser.add_argument('-r', '--rho', nargs="?",
required=False,
const=True,
dest="rho",
metavar="<r OR m>",
help="Plot Rho-figure")
parser.add_argument('--is_dum', nargs="?",
required=False,
const=True,
dest="is_dum",
help="Set is_dum = TRUE to parse abn-file with dum columns")
parser.add_argument('-x',
required=False,
dest="x",
default='m',
metavar="<m OR r OR lgR OR rsun OR m OR z>",
help="Setup abscissa: radius or lg(R) OR mass OR zone")
# parser.add_argument('-c', '--chem',
# required=False,
# type=bool,
# default=True,
# dest="is_chem",
# help="Show chemical composition, default: True")
parser.add_argument('--structure', dest='is_structure', action='store_true',
help="Show the chemical composition and rho with R/M coordinates.")
parser.add_argument('--chem', dest='is_chem', action='store_true', help="Show chemical composition [default].")
parser.add_argument('--no-chem', dest='is_chem', action='store_false', help="Not show chemical composition")
parser.set_defaults(is_chem=True)
parser.add_argument('-i', '--input', action='append', nargs=1,
metavar='model name', help='Key -i can be used multiple times')
parser.add_argument('-p', '--path',
required=False,
type=str,
default='./',
dest="path",
help="Model directory")
parser.add_argument('-e', '--elements',
required=False,
type=str,
default='H:He:C:O:Si:Fe:Ni:Ni56',
dest="elements",
help="Elements directory. \n Available: {0}".format(':'.join(sneve.eve_elements)))
parser.add_argument('--reshape',
required=False,
type=str,
default=None,
dest="reshape",
help="Reshape parameters of envelope from nstart to nend to nz-zones."
"\n Format: --reshape NZON:AXIS:XMODE:START:END:KIND. You may use * to set default value."
"\n NZON: value of zones between START and END. "
"If < 0 Nzon is the same as Nzon of the initial model "
"\n AXIS: [M* OR R OR V] - reshape along mass or radius or velocity coordinate."
"\n XMODE: [lin OR rlog OR resize*] - linear OR reversed log10 OR add/remove points. "
"\n START: zone number to start reshaping. Default: 0 (first zone)"
"\n END: zone number to end reshaping. Default: None, (equal last zone)"
"\n KIND: [np OR interp1d(..kind)], kind is ('np=np.interp', 'linear', 'nearest', "
"'zero', 'slinear', 'quadratic, 'cubic', "
"'spline' = UnivariateSpline, 'gauss' = gaussian_filter1d). Default: np "
)
parser.add_argument('--smooth',
required=False,
type=str,
default=None,
dest="smooth",
help="Smoothing density of envelope. "
"The smoothing procedure is used Savitzky–Golay filter with parameters. "
"See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html "
"\n Format: --smooth WINDOW_LENGTH:POLYORDER:MODE:IS_INFO."
"\n WINDOW_LENGTH: The length of the filter window."
"\n POLYORDER: The order of the polynomial used to fit the samples."
"POLYORDER must be less than WINDOW_LENGTH."
"\n MODE: Must be ‘mirror’, ‘constant’, ‘nearest’, ‘wrap’ or ‘interp’. Default: interp "
"\n IS_INFO: is any to print additional info. Default: False "
"\n Example: --smooth 5:2 OR --smooth 5:2:nearest:1"
)
parser.add_argument('--log',
required=False,
type=str,
default='INFO',
dest="log",
help="Set logging level: "
"CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET"
)
parser.add_argument('-s', '--save',
required=False,
type=str,
default=False,
dest="save_plot",
help="save plot to pdf-file, default: False")
parser.add_argument('--verb',
action='store_const',
const=True,
dest="is_verb",
help="To enable verbose output")
parser.add_argument('-w', '--write',
required=False,
type=str,
default=False,
dest="write_to",
help="To write the data to hyd-, abn-files")
return parser
def print_masses(presn, is_el=True):
m_el_tot = 0.
for ii, el in enumerate(presn.Elements):
m = presn.mass_tot_el(el) / phys.M_sun
m_el_tot += m
if is_el:
print(f' {el:3}: {m:.3e}')
print(f' M_full(Elements) = {m_el_tot:.3f}')
print(f' M_total = {presn.m_tot / phys.M_sun:.3f}')
# via density
print(f' M_tot(Density) = {presn.mass_tot_rho() / phys.M_sun:.3f}')
def main():
import os
import sys
from itertools import cycle
import logging
def is_level(lvl):
levels = ['CRITICAL', 'FATAL','ERROR','WARN','WARNING','INFO','DEBUG','NOTSET']
return lvl.upper() in levels
def get(arr, i, default):
if i < len(arr):
if a[i] != '*':
return a[i]
return default
parser = get_parser()
args, unknownargs = parser.parse_known_args()
eve_prev = None
markersize = 6
fig = None
if args.path:
pathDef = os.path.expanduser(args.path)
else:
pathDef = os.getcwd()
logger = logging.getLogger(__name__)
level = logging.INFO
if args.log is not None:
if is_level(args.log):
level = logging.getLevelName(args.log.upper())
else:
level = logging.INFO
print(f"ERROR: Bad value for log: {level}. See help.")
logger.setLevel(level)
logging.basicConfig(level=level)
logSNEve = logging.getLogger('pystella.model.sn_eve').setLevel(level)
# if args.elements:
if '_' in args.elements:
elements = list(sneve.eve_elements)
excluded = args.elements.split(':')
for e in excluded:
if not e.startswith('_'):
logger.error('For excluded mode all elements should be starts from _. Even element: ' + e)
sys.exit(2)
e = e[1:]
if e not in sneve.eve_elements:
logger.error('No such element: ' + e)
sys.exit(2)
elements.remove(e)
else:
elements = args.elements.split(':')
for e in elements:
if e not in sneve.eve_elements:
logger.error('No such element: ' + e)
sys.exit(2)
# Set model names
names = []
if args.input:
for nm in args.input:
names.append(nm[0]) # remove extension
else:
if len(unknownargs) > 0:
names.append(unknownargs[0])
if len(names) == 0:
# logger.error(" No data. Use key '-i' ")
parser.print_help()
sys.exit(2)
if len(names) > 1 or args.reshape is not None or args.box is not None: # special case
markers_cycler = cycle(markers_style)
lines_cycler = cycle(lines_style)
else:
markers_cycler = cycle([None])
lines_cycler = cycle(['-'])
ax = None
ax2 = None
handles_nm = []
for nm in names:
logger.info("Run eve-model %s" % nm)
path, fullname = os.path.split(nm)
if len(path) == 0:
path = pathDef
# print("Run eve-model %s in %s" % (name, path))
if fullname.endswith('hyd') or fullname.endswith('abn'):
name = fullname.replace('.hyd', '') # remove extension
name = name.replace('.abn', '') # remove extension
try:
# With header
eve = sneve.load_hyd_abn(name=name, path=path, is_dm=False, is_dum=args.is_dum)
except ValueError:
# No header
eve = sneve.load_hyd_abn(name=name, path=path, is_dm=False, is_dum=args.is_dum, skiprows=0)
else:
name = fullname.replace('.rho', '') # remove extension
rho_file = os.path.join(path, name + '.rho')
eve = sneve.load_rho(rho_file)
# Reshape
if args.reshape is not None:
a = args.reshape.split(':')
nz, axis, xmode = get(a, 0, eve.nzon), get(a, 1, 'M'), get(a, 2, 'resize') # rlog
start, end = get(a, 3, 0), get(a, 4, None)
kind = get(a, 5, 'np')
start = int(start)
if end is None or end.upper() in 'NONE':
end = None
if end is not None:
end = int(end)
nz = int(nz)
print(f'Resize: before Nzon={eve.nzon}')
print(f'Resize parameters: nznew= {nz} axis={axis} xmode={xmode} '
f'start= {start} end= {end} kind= {kind}')
print("The element masses: before Resize")
print_masses(eve)
eve_reshape = eve.reshape(nz=nz, axis=axis, xmode=xmode, start=start, end=end, kind=kind)
# eve = eve_resize
print(f'Resize: after Nzon={eve_reshape.nzon}')
print("The element masses: after Resize")
print_masses(eve_reshape)
if False:
eve_reshape.chem_norm()
# eve = eve_resize
print(f'After chem_norm: Nzon={eve_reshape.nzon}')
print("The element masses: after chem_norm")
print_masses(eve_reshape)
eve, eve_prev = eve_reshape, eve
# Boxcar
if args.box is not None:
is_info = False
m_up = None
s = args.box.split(':')
dm, n = float(s[0]), int(s[1])
print(f'Running boxcar average: dm= {dm} Msun Repeats= {n}')
if len(s) > 2:
m_up = float(s[2])
print(f'The mass has up limit: m_up= {m_up}')
if len(s) > 3:
is_info = bool(s[3])
print("The element masses: Before boxcar")
print_masses(eve)
eve_box = eve.boxcar(box_dm=dm, n=n, el_included=elements, m_uplim=m_up, is_info=is_info)
print("The element masses: After boxcar")
print_masses(eve_box)
eve, eve_prev = eve_box, eve
# Smooth
if args.smooth is not None:
is_info = False
s = args.smooth.split(':')
window_length, polyorder = int(s[0]), int(s[1])
mode = 'interp'
if len(s) == 3:
mode = s[2]
if len(s) == 4:
is_info = True
print(f'Running Savitzky-Golay filter to Rho: '
f'window_length= {window_length} Msun polyorder= {polyorder} mode= {mode}')
print("The element masses: Before smoothing")
print_masses(eve, is_el=is_info)
eve_smooth = eve.smooth(window_length=window_length, polyorder=polyorder, mode=mode)
print("The element masses: After smoothing")
print_masses(eve_smooth, is_el=is_info)
eve, eve_prev = eve_smooth, eve
if args.write_to:
fname = os.path.expanduser(args.write_to)
# fname = os.path.join(path, name)
# f = fname + '.eve.abn'
fname = fname.replace('.rho', '')
f = fname + '.abn'
if eve.write_abn(f, is_header=True):
print(" abn has been saved to {}".format(f))
else:
print("Error with abn saving to {}".format(f))
# f = fname + '.eve.hyd'
f = fname + '.hyd'
if eve.write_hyd(f):
print(" hyd has been saved to {}".format(f))
else:
print("Error with hyd saving to {}".format(f))
continue
marker = next(markers_cycler)
ls = next(lines_cycler)
if args.is_structure:
fig = eve.plot_structure(elements=elements, title=name, ylimChem=(1e-8, 1.))
else:
if args.is_chem:
# print "Plot eve-model %s" % name
ax = eve.plot_chem(elements=elements, ax=ax, x=args.x, ylim=(1e-8, 1.), marker=marker,
markersize=markersize, leg_loc='lower center')
if eve_prev is not None:
eve_prev.plot_chem(elements=elements, ax=ax, x=args.x, ylim=(1e-8, 1.), marker=marker,
markersize=max(1, markersize - 2), alpha=0.5, leg_loc='lower center')
# ax.set_title('{}: before boxcar'.format(eve_prev.Name))
if args.rho:
if args.is_chem:
if ax2 is None:
ax2 = ax.twinx()
ax2.set_ylabel(r'$\rho, [g/cm^3]$ ')
else:
ax2 = ax
ax2 = eve.plot_rho(x=args.x, ax=ax2, ls=ls, marker=marker)
if eve_prev is not None:
eve_prev.plot_rho(x=args.x, ax=ax2, ls=ls, markersize=max(1, markersize - 2), alpha=0.5)
else:
ls = 'None'
handle = mlines.Line2D([], [], color='black', marker=marker,
markersize=markersize, label=name, linestyle=ls)
handles_nm.append(handle)
if len(names) > 1:
if ax2 is None:
ax2 = ax.twinx()
ax2.legend(handles=handles_nm, loc=4, fancybox=False, frameon=False)
if args.is_verb:
m_tot = 0.
m_ni56 = 0.
print('{:22s}: '.format(eve.Name))
for n, m in eve.mass_tot_el().items():
m_tot += m
print(f'{n} {m/phys.M_sun:.3e}')
print(' m_tot= {:6.3f} m_tot(El)= {:6.3f} '.format(eve.m_tot/phys.M_sun, m_tot/phys.M_sun))
# eve.chem_norm()
# print('{:22s} after Norm '.format(eve.Name))
# for n, m in eve.mass_tot_el().items():
# m_tot += m
# print(f'{n} {m/phys.M_sun:.3e}')
# print(' m_tot= {:6.3f} m_tot(El)= {:6.3f} '.format(eve.m_tot/phys.M_sun, m_tot/phys.M_sun))
if not args.write_to:
if args.save_plot:
fsave = os.path.expanduser(args.save_plot)
# if args.rho:
# fsave = os.path.join(os.path.expanduser('~/'), 'rho_%s.pdf' % names[0])
# else:
# fsave = os.path.join(os.path.expanduser('~/'), 'chem_%s.pdf' % names[0])
logger.info(" Save plot to %s " % fsave)
if fig is None:
fig = ax.get_figure()
fig.savefig(fsave, bbox_inches='tight')
else:
plt.show()
if __name__ == '__main__':
main()
|
{
"content_hash": "e4163cb8e756e1a1177f7eea9f995a88",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 120,
"avg_line_length": 40.45842696629214,
"alnum_prop": 0.48472561652966006,
"repo_name": "baklanovp/pystella",
"id": "27475c5cac0b70d2fb54bb6599246755f8dbe565",
"size": "18050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eve.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "378"
},
{
"name": "Jupyter Notebook",
"bytes": "32344"
},
{
"name": "Perl",
"bytes": "8638492"
},
{
"name": "Python",
"bytes": "965333"
},
{
"name": "ReScript",
"bytes": "5700682"
},
{
"name": "Roff",
"bytes": "19642"
}
],
"symlink_target": ""
}
|
from CMash import MinHash as MH
import numpy as np
import scipy as sp
from scipy.io import loadmat
import pandas as pd
import os
import timeit
from collections import Counter
# First, read in the sketches of just the appropriate critters
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/test.csv'
#training_base_name = '/nfs1/Koslicki_Lab/koslickd/RepoPhlAn-7-24-18/out/microbes_24072018/fna/'
#training_hdf_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/Test.h5'
#hit_matrices_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/test_hit_matrix.npz'
# Very small test
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RealSmallTest/Test.csv'
#training_base_name = '/home/dkoslicki/Data/MiCOPMinHash/Test/RealSmallTest/'
#training_hdf_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RealSmallTest/Test.h5'
#hit_matrices_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RealSmallTest/Test_hit_matrix.npz'
# real test
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_postprocess.csv'
#training_base_name = '/nfs1/Koslicki_Lab/koslickd/RepoPhlAn-7-24-18/out/microbes_24072018/fna/'
#training_hdf_file = '/home/dkoslicki/Data/MiCOPMinHash/AllBacteria.hd5'
#hit_matrices_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_postprocess_hit_matrix.npz'
#results_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_postprocess_finished.csv'
# real medium
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RM_S001_classified_postprocess.csv'
#training_base_name = '/nfs1/Koslicki_Lab/koslickd/RepoPhlAn-7-24-18/out/microbes_24072018/fna/'
#training_hdf_file = '/home/dkoslicki/Data/MiCOPMinHash/AllBacteria.hd5'
#hit_matrices_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RM_S001_classified_postprocess_hit_matrix.npz'
#results_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RM_S001_classified_postprocess_finished.csv'
# real high
cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RH_S001_classified_postprocess.csv'
training_base_name = '/nfs1/Koslicki_Lab/koslickd/RepoPhlAn-7-24-18/out/microbes_24072018/fna/'
training_hdf_file = '/home/dkoslicki/Data/MiCOPMinHash/AllBacteria.hd5'
hit_matrices_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RH_S001_classified_postprocess_hit_matrix.npz'
results_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RH_S001_classified_postprocess_finished.csv'
# Nathan's database
cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60.csv'
training_base_name = '/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/organism_files/'
training_hdf_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/micopdb-n1000-30-60.h5'
hit_matrices_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60_hit_matrix.npz'
results_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60_postprocess.csv'
t0 = timeit.default_timer()
coverage_threshold = 0.0062
sort_key = 'k=60'
location_of_thresh = -1
# read in the file and sort as needed
df = pd.read_csv(cmash_out_file, index_col=0)
#df = df[df['k=60'] > 0.01].sort_values('k=60', ascending=False) # for the ones that had -c 0, add a threshold for sanity sake
names_passed_thresh = list(df.index)
names_passed_thresh_with_path = []
for name in names_passed_thresh:
names_passed_thresh_with_path.append(training_base_name + name)
CEs = MH.import_multiple_from_single_hdf5(training_hdf_file, import_list=names_passed_thresh_with_path)
training_file_names = [c.input_file_name for c in CEs]
# import the hit matrices
hit_matrices_dict = loadmat(hit_matrices_file)
# now, for each one of the sketches, look for unique k-mer in it, set non-unique to zero
k_range = sorted([int(i.split('=')[1]) for i in df.keys()])
# Make the hit matrices dense
hit_matrices_dense_dict = dict()
for k_size in k_range:
hit_matrices_dense_dict['k=%d' % k_size] = hit_matrices_dict['k=%d' % k_size].todense()
hit_matrices_dict = hit_matrices_dense_dict
all_kmers_with_counts = dict()
is_unique_kmer = set()
is_unique_kmer_per_ksize = dict()
for k_size in k_range:
is_unique_kmer_per_ksize[k_size] = set()
for i in range(len(CEs)):
for big_kmer in CEs[i]._kmers:
kmer = big_kmer[:k_size]
if kmer in all_kmers_with_counts:
all_kmers_with_counts[kmer] += 1
else:
all_kmers_with_counts[kmer] = 1
for kmer in all_kmers_with_counts.keys():
if all_kmers_with_counts[kmer] == 1:
k_size = len(kmer)
is_unique_kmer_per_ksize[k_size].add(kmer)
is_unique_kmer.add(kmer)
num_unique = dict()
for i in range(len(CEs)):
for k_size in k_range:
current_kmers = [k[:k_size] for k in CEs[i]._kmers]
current_kmers_set = set(current_kmers)
non_unique = set()
for kmer in current_kmers:
if kmer not in is_unique_kmer_per_ksize[k_size]:
non_unique.add(kmer)
to_zero_indicies = [ind for ind, kmer in enumerate(current_kmers) if kmer in non_unique]
hit_matrices_dict['k=%d' % k_size][i, to_zero_indicies] = 0 # set these to zero since they show up in other sketches (so not informative)
num_unique[i, k_range.index(k_size)] = len(current_kmers_set) - len(non_unique) # keep track of the size of the unique k-mers
# sum the modified hit matrices to get the size of the intersection
containment_indices = np.zeros((len(names_passed_thresh_with_path), len(k_range))) # TODO: could make this thing sparse, or do the filtering for above threshold here
for k_size_loc in range(len(k_range)):
k_size = k_range[k_size_loc]
containment_indices[:, k_size_loc] = (hit_matrices_dict['k=%d' % k_size].sum(axis=1).ravel()) #/float(num_hashes))
# then normalize by the number of unique k-mers (to get the containment index)
for k_size_loc in range(len(k_range)):
k_size = k_range[k_size_loc]
for hash_loc in np.where(containment_indices[:, k_size_loc])[0]: # find the genomes with non-zero containment
unique_kmers = set()
for kmer in CEs[hash_loc]._kmers:
unique_kmers.add(kmer[:k_size]) # find the unique k-mers
containment_indices[hash_loc, k_size_loc] /= float(len(unique_kmers)) # TODO: this doesn't seem like the right way to normalize, but apparently it is!
#containment_indices[hash_loc, k_size_loc] /= float(num_unique[hash_loc, k_size_loc]) # divide by the unique num of k-mers
t1 = timeit.default_timer()
print("Time to reduce hit matrix: %f" % (t1 - t0))
results = dict()
for k_size_loc in range(len(k_range)):
ksize = k_range[k_size_loc]
key = 'k=%d' % ksize
results[key] = containment_indices[:, k_size_loc]
df = pd.DataFrame(results, map(os.path.basename, names_passed_thresh_with_path))
df = df.reindex(labels=['k=' + str(k_size) for k_size in k_range], axis=1) # sort columns in ascending order
sort_key = 'k=%d' % k_range[location_of_thresh]
max_key = 'k=%d' % k_range[-1]
filtered_results = df[df[sort_key] > coverage_threshold].sort_values(max_key, ascending=False) # only select those where the highest k-mer size's count is above the threshold
print(filtered_results)
print(filtered_results.shape)
t1 = timeit.default_timer()
print("Time to execute everything: %f" % (t1 - t0))
filtered_results.to_csv(results_file, index=True, encoding='utf-8')
|
{
"content_hash": "3023c04262ce5ddef52178ab662ff30d",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 175,
"avg_line_length": 47.5921052631579,
"alnum_prop": 0.7380425767210396,
"repo_name": "dkoslicki/CMash",
"id": "acb42ad8b34be97652b84b2b5b629e765db94d79",
"size": "7466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ideas/PostProcessHitMatrix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "227407"
},
{
"name": "Shell",
"bytes": "3364"
}
],
"symlink_target": ""
}
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from xml.dom.minidom import Node
class BaseScriptNode(BasicSvgNode):
def __init__(self, ownerDoc, tagName):
BasicSvgNode.__init__(self, ownerDoc, tagName)
self.allowAllSvgNodesAsChildNodes = True
def setData(self, data):
if data is not None:
foundCDataNode = False
#check to see if we have a child node (text node)
if len(self.childNodes) >= 1:
for child in self.childNodes:
#search for the first cdata node and interpret it as being the node that contains the data
if child.nodeType == Node.CDATA_SECTION_NODE:
child.data = data
foundCDataNode = True
break
if foundCDataNode == False:
textNode = self.ownerDocument.createCDATASection(data)
self.appendChild(textNode)
else:
textNode = self.ownerDocument.createCDATASection(data)
self.appendChild(textNode)
def getData(self):
if len(self.childNodes) >= 1:
for child in self.childNodes:
#search for the first text node and interpret it as being the node that contains the css data
if child.nodeType == Node.CDATA_SECTION_NODE:
return child.data
return None
def appendChild(self, node):
if node.nodeType == Node.CDATA_SECTION_NODE:
if len(self.childNodes) == 0:
BasicSvgNode.appendChild(self, node)
else:
raise Exception('only one CDATA node can be present, use the getData and setData to change the data')
else:
raise Exception('only CDATA nodes can be added')
|
{
"content_hash": "c7753a5f14a15e1344fe2987fe5ee250",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 117,
"avg_line_length": 37.92,
"alnum_prop": 0.5617088607594937,
"repo_name": "danrg/RGT-tool",
"id": "82be2ad85987afde315c34785969bc461fe01179",
"size": "1896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/RGT/XML/SVG/baseScriptNode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83200"
},
{
"name": "HTML",
"bytes": "93970"
},
{
"name": "JavaScript",
"bytes": "111380"
},
{
"name": "Python",
"bytes": "788710"
},
{
"name": "SQLPL",
"bytes": "722"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO
import time
# blinking function
def blink(pin):
GPIO.output(pin, GPIO.HIGH)
time.sleep(1)
GPIO.output(pin, GPIO.LOW)
time.sleep(1)
return
# to use Raspberry Pi board pin numbers
GPIO.setmode(GPIO.BOARD)
# set up GPIO output channel
GPIO.setup(11, GPIO.OUT)
# blink GPI017 50 times
for i in range(0, 50):
blink(11)
GPIO.cleanup()
|
{
"content_hash": "c1a52e1dbfe3e8699181399998a47eff",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 39,
"avg_line_length": 23.11764705882353,
"alnum_prop": 0.6717557251908397,
"repo_name": "songbird175/ReFilament",
"id": "0797120cdb43b503dfb80293610171073be900d9",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LEDblink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "8317"
},
{
"name": "CSS",
"bytes": "6606"
},
{
"name": "HTML",
"bytes": "81241"
},
{
"name": "Jupyter Notebook",
"bytes": "26812"
},
{
"name": "Mathematica",
"bytes": "43876"
},
{
"name": "PHP",
"bytes": "2282"
},
{
"name": "Python",
"bytes": "16920"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.