hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c471283593726a928d0d7ec9a44612cdf93d9bf | 12,006 | py | Python | saleor/graphql/payment/mutations.py | frediian/test-saleor | 6bc7d153bbedb2b6cfe9b125325c785a08ac32bd | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/payment/mutations.py | frediian/test-saleor | 6bc7d153bbedb2b6cfe9b125325c785a08ac32bd | [
"CC-BY-4.0"
] | 22 | 2021-08-02T04:47:57.000Z | 2022-03-28T04:41:58.000Z | saleor/graphql/payment/mutations.py | frediian/test-saleor | 6bc7d153bbedb2b6cfe9b125325c785a08ac32bd | [
"CC-BY-4.0"
] | null | null | null | import graphene
from django.core.exceptions import ValidationError
from ...channel.models import Channel
from ...checkout.calculations import calculate_checkout_total_with_gift_cards
from ...checkout.checkout_cleaner import clean_billing_address, clean_checkout_shipping
from ...checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from ...checkout.utils import cancel_active_payments
from ...core.permissions import OrderPermissions
from ...core.utils import get_client_ip
from ...core.utils.url import validate_storefront_url
from ...payment import PaymentError, gateway
from ...payment.error_codes import PaymentErrorCode
from ...payment.utils import create_payment, is_currency_supported
from ..account.i18n import I18nMixin
from ..checkout.types import Checkout
from ..core.mutations import BaseMutation
from ..core.scalars import PositiveDecimal
from ..core.types import common as common_types
from .types import Payment, PaymentInitialized
class PaymentInput(graphene.InputObjectType):
gateway = graphene.Field(
graphene.String,
description="A gateway to use with that payment.",
required=True,
)
token = graphene.String(
required=False,
description=(
"Client-side generated payment token, representing customer's "
"billing data in a secure manner."
),
)
amount = PositiveDecimal(
required=False,
description=(
"Total amount of the transaction, including "
"all taxes and discounts. If no amount is provided, "
"the checkout total will be used."
),
)
return_url = graphene.String(
required=False,
description=(
"URL of a storefront view where user should be redirected after "
"requiring additional actions. Payment with additional actions will not be "
"finished if this field is not provided."
),
)
class CheckoutPaymentCreate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="Related checkout object.")
payment = graphene.Field(Payment, description="A newly created payment.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
input = PaymentInput(
description="Data required to create a new payment.", required=True
)
class Meta:
description = "Create a new payment for given checkout."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def clean_payment_amount(cls, info, checkout_total, amount):
if amount != checkout_total.gross.amount:
raise ValidationError(
{
"amount": ValidationError(
"Partial payments are not allowed, amount should be "
"equal checkout's total.",
code=PaymentErrorCode.PARTIAL_PAYMENT_NOT_ALLOWED,
)
}
)
@classmethod
def validate_gateway(cls, manager, gateway_id, currency):
if not is_currency_supported(currency, gateway_id, manager):
raise ValidationError(
{
"gateway": ValidationError(
f"The gateway {gateway_id} does not support checkout currency.",
code=PaymentErrorCode.NOT_SUPPORTED_GATEWAY.value,
)
}
)
@classmethod
def validate_token(cls, manager, gateway: str, input_data: dict, channel_slug: str):
token = input_data.get("token")
is_required = manager.token_is_required_as_payment_input(gateway, channel_slug)
if not token and is_required:
raise ValidationError(
{
"token": ValidationError(
f"Token is required for {gateway}.",
code=PaymentErrorCode.REQUIRED.value,
),
}
)
@classmethod
def validate_return_url(cls, input_data):
return_url = input_data.get("return_url")
if not return_url:
return
try:
validate_storefront_url(return_url)
except ValidationError as error:
raise ValidationError(
{"redirect_url": error}, code=PaymentErrorCode.INVALID
)
@classmethod
def perform_mutation(cls, _root, info, checkout_id, **data):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
data = data["input"]
gateway = data["gateway"]
manager = info.context.plugins
cls.validate_gateway(manager, gateway, checkout.currency)
cls.validate_return_url(data)
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines, info.context.discounts, manager
)
cls.validate_token(
manager, gateway, data, channel_slug=checkout_info.channel.slug
)
address = (
checkout.shipping_address or checkout.billing_address
) # FIXME: check which address we need here
checkout_total = calculate_checkout_total_with_gift_cards(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=info.context.discounts,
)
amount = data.get("amount", checkout_total.gross.amount)
clean_checkout_shipping(checkout_info, lines, PaymentErrorCode)
clean_billing_address(checkout_info, PaymentErrorCode)
cls.clean_payment_amount(info, checkout_total, amount)
extra_data = {
"customer_user_agent": info.context.META.get("HTTP_USER_AGENT"),
}
cancel_active_payments(checkout)
payment = create_payment(
gateway=gateway,
payment_token=data.get("token", ""),
total=amount,
currency=checkout.currency,
email=checkout.get_customer_email(),
extra_data=extra_data,
# FIXME this is not a customer IP address. It is a client storefront ip
customer_ip_address=get_client_ip(info.context),
checkout=checkout,
return_url=data.get("return_url"),
)
return CheckoutPaymentCreate(payment=payment, checkout=checkout)
class PaymentCapture(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
amount = PositiveDecimal(description="Transaction amount.")
class Meta:
description = "Captures the authorized payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.capture(
payment, info.context.plugins, amount=amount, channel_slug=channel_slug
)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentCapture(payment=payment)
class PaymentRefund(PaymentCapture):
class Meta:
description = "Refunds the captured payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.refund(
payment, info.context.plugins, amount=amount, channel_slug=channel_slug
)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentRefund(payment=payment)
class PaymentVoid(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
class Meta:
description = "Voids the authorized payment."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.void(payment, info.context.plugins, channel_slug=channel_slug)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentVoid(payment=payment)
class PaymentInitialize(BaseMutation):
initialized_payment = graphene.Field(PaymentInitialized, required=False)
class Arguments:
gateway = graphene.String(
description="A gateway name used to initialize the payment.",
required=True,
)
channel = graphene.String(
description="Slug of a channel for which the data should be returned.",
)
payment_data = graphene.JSONString(
required=False,
description=(
"Client-side generated data required to initialize the payment."
),
)
class Meta:
description = "Initializes payment process when it is required by gateway."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def validate_channel(cls, channel_slug):
try:
channel = Channel.objects.get(slug=channel_slug)
except Channel.DoesNotExist:
raise ValidationError(
{
"channel": ValidationError(
f"Channel with '{channel_slug}' slug does not exist.",
code=PaymentErrorCode.NOT_FOUND.value,
)
}
)
if not channel.is_active:
raise ValidationError(
{
"channel": ValidationError(
f"Channel with '{channel_slug}' is inactive.",
code=PaymentErrorCode.CHANNEL_INACTIVE.value,
)
}
)
return channel
@classmethod
def perform_mutation(cls, _root, info, gateway, channel, payment_data):
cls.validate_channel(channel_slug=channel)
try:
response = info.context.plugins.initialize_payment(
gateway, payment_data, channel_slug=channel
)
except PaymentError as e:
raise ValidationError(
{
"payment_data": ValidationError(
str(e), code=PaymentErrorCode.INVALID.value
)
}
)
return PaymentInitialize(initialized_payment=response)
| 36.381818 | 88 | 0.623938 | import graphene
from django.core.exceptions import ValidationError
from ...channel.models import Channel
from ...checkout.calculations import calculate_checkout_total_with_gift_cards
from ...checkout.checkout_cleaner import clean_billing_address, clean_checkout_shipping
from ...checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from ...checkout.utils import cancel_active_payments
from ...core.permissions import OrderPermissions
from ...core.utils import get_client_ip
from ...core.utils.url import validate_storefront_url
from ...payment import PaymentError, gateway
from ...payment.error_codes import PaymentErrorCode
from ...payment.utils import create_payment, is_currency_supported
from ..account.i18n import I18nMixin
from ..checkout.types import Checkout
from ..core.mutations import BaseMutation
from ..core.scalars import PositiveDecimal
from ..core.types import common as common_types
from .types import Payment, PaymentInitialized
class PaymentInput(graphene.InputObjectType):
gateway = graphene.Field(
graphene.String,
description="A gateway to use with that payment.",
required=True,
)
token = graphene.String(
required=False,
description=(
"Client-side generated payment token, representing customer's "
"billing data in a secure manner."
),
)
amount = PositiveDecimal(
required=False,
description=(
"Total amount of the transaction, including "
"all taxes and discounts. If no amount is provided, "
"the checkout total will be used."
),
)
return_url = graphene.String(
required=False,
description=(
"URL of a storefront view where user should be redirected after "
"requiring additional actions. Payment with additional actions will not be "
"finished if this field is not provided."
),
)
class CheckoutPaymentCreate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="Related checkout object.")
payment = graphene.Field(Payment, description="A newly created payment.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
input = PaymentInput(
description="Data required to create a new payment.", required=True
)
class Meta:
description = "Create a new payment for given checkout."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def clean_payment_amount(cls, info, checkout_total, amount):
if amount != checkout_total.gross.amount:
raise ValidationError(
{
"amount": ValidationError(
"Partial payments are not allowed, amount should be "
"equal checkout's total.",
code=PaymentErrorCode.PARTIAL_PAYMENT_NOT_ALLOWED,
)
}
)
@classmethod
def validate_gateway(cls, manager, gateway_id, currency):
if not is_currency_supported(currency, gateway_id, manager):
raise ValidationError(
{
"gateway": ValidationError(
f"The gateway {gateway_id} does not support checkout currency.",
code=PaymentErrorCode.NOT_SUPPORTED_GATEWAY.value,
)
}
)
@classmethod
def validate_token(cls, manager, gateway: str, input_data: dict, channel_slug: str):
token = input_data.get("token")
is_required = manager.token_is_required_as_payment_input(gateway, channel_slug)
if not token and is_required:
raise ValidationError(
{
"token": ValidationError(
f"Token is required for {gateway}.",
code=PaymentErrorCode.REQUIRED.value,
),
}
)
@classmethod
def validate_return_url(cls, input_data):
return_url = input_data.get("return_url")
if not return_url:
return
try:
validate_storefront_url(return_url)
except ValidationError as error:
raise ValidationError(
{"redirect_url": error}, code=PaymentErrorCode.INVALID
)
@classmethod
def perform_mutation(cls, _root, info, checkout_id, **data):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
data = data["input"]
gateway = data["gateway"]
manager = info.context.plugins
cls.validate_gateway(manager, gateway, checkout.currency)
cls.validate_return_url(data)
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines, info.context.discounts, manager
)
cls.validate_token(
manager, gateway, data, channel_slug=checkout_info.channel.slug
)
address = (
checkout.shipping_address or checkout.billing_address
)
checkout_total = calculate_checkout_total_with_gift_cards(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=info.context.discounts,
)
amount = data.get("amount", checkout_total.gross.amount)
clean_checkout_shipping(checkout_info, lines, PaymentErrorCode)
clean_billing_address(checkout_info, PaymentErrorCode)
cls.clean_payment_amount(info, checkout_total, amount)
extra_data = {
"customer_user_agent": info.context.META.get("HTTP_USER_AGENT"),
}
cancel_active_payments(checkout)
payment = create_payment(
gateway=gateway,
payment_token=data.get("token", ""),
total=amount,
currency=checkout.currency,
email=checkout.get_customer_email(),
extra_data=extra_data,
customer_ip_address=get_client_ip(info.context),
checkout=checkout,
return_url=data.get("return_url"),
)
return CheckoutPaymentCreate(payment=payment, checkout=checkout)
class PaymentCapture(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
amount = PositiveDecimal(description="Transaction amount.")
class Meta:
description = "Captures the authorized payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.capture(
payment, info.context.plugins, amount=amount, channel_slug=channel_slug
)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentCapture(payment=payment)
class PaymentRefund(PaymentCapture):
class Meta:
description = "Refunds the captured payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.refund(
payment, info.context.plugins, amount=amount, channel_slug=channel_slug
)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentRefund(payment=payment)
class PaymentVoid(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
class Meta:
description = "Voids the authorized payment."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.void(payment, info.context.plugins, channel_slug=channel_slug)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentVoid(payment=payment)
class PaymentInitialize(BaseMutation):
initialized_payment = graphene.Field(PaymentInitialized, required=False)
class Arguments:
gateway = graphene.String(
description="A gateway name used to initialize the payment.",
required=True,
)
channel = graphene.String(
description="Slug of a channel for which the data should be returned.",
)
payment_data = graphene.JSONString(
required=False,
description=(
"Client-side generated data required to initialize the payment."
),
)
class Meta:
description = "Initializes payment process when it is required by gateway."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def validate_channel(cls, channel_slug):
try:
channel = Channel.objects.get(slug=channel_slug)
except Channel.DoesNotExist:
raise ValidationError(
{
"channel": ValidationError(
f"Channel with '{channel_slug}' slug does not exist.",
code=PaymentErrorCode.NOT_FOUND.value,
)
}
)
if not channel.is_active:
raise ValidationError(
{
"channel": ValidationError(
f"Channel with '{channel_slug}' is inactive.",
code=PaymentErrorCode.CHANNEL_INACTIVE.value,
)
}
)
return channel
@classmethod
def perform_mutation(cls, _root, info, gateway, channel, payment_data):
cls.validate_channel(channel_slug=channel)
try:
response = info.context.plugins.initialize_payment(
gateway, payment_data, channel_slug=channel
)
except PaymentError as e:
raise ValidationError(
{
"payment_data": ValidationError(
str(e), code=PaymentErrorCode.INVALID.value
)
}
)
return PaymentInitialize(initialized_payment=response)
| true | true |
1c4712b10065ae738ea39947160be15df662fbae | 4,778 | py | Python | cinder/api/contrib/extended_snapshot_attributes.py | cloudbau/cinder | 3179f2f42ae940a08b910e326a809556689864d8 | [
"Apache-2.0"
] | null | null | null | cinder/api/contrib/extended_snapshot_attributes.py | cloudbau/cinder | 3179f2f42ae940a08b910e326a809556689864d8 | [
"Apache-2.0"
] | null | null | null | cinder/api/contrib/extended_snapshot_attributes.py | cloudbau/cinder | 3179f2f42ae940a08b910e326a809556689864d8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Snapshot Attributes API extension."""
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import volume
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer(
'volume',
'extended_snapshot_attributes')
class ExtendedSnapshotAttributesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedSnapshotAttributesController, self).__init__(*args,
**kwargs)
self.volume_api = volume.API()
def _get_snapshots(self, context):
snapshots = self.volume_api.get_all_snapshots(context)
rval = dict((snapshot['id'], snapshot) for snapshot in snapshots)
return rval
def _extend_snapshot(self, context, snapshot, data):
for attr in ['project_id', 'progress']:
key = "%s:%s" % (Extended_snapshot_attributes.alias, attr)
snapshot[key] = data[attr]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedSnapshotAttributeTemplate())
try:
snapshot = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
explanation = _("Snapshot not found.")
raise exc.HTTPNotFound(explanation=explanation)
self._extend_snapshot(context, resp_obj.obj['snapshot'], snapshot)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedSnapshotAttributesTemplate())
snapshots = list(resp_obj.obj.get('snapshots', []))
db_snapshots = self._get_snapshots(context)
for snapshot_object in snapshots:
try:
snapshot_data = db_snapshots[snapshot_object['id']]
except KeyError:
continue
self._extend_snapshot(context, snapshot_object, snapshot_data)
class Extended_snapshot_attributes(extensions.ExtensionDescriptor):
"""Extended SnapshotAttributes support."""
name = "ExtendedSnapshotAttributes"
alias = "os-extended-snapshot-attributes"
namespace = ("http://docs.openstack.org/volume/ext/"
"extended_snapshot_attributes/api/v1")
updated = "2012-06-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = ExtendedSnapshotAttributesController()
extension = extensions.ControllerExtension(self, 'snapshots',
controller)
return [extension]
def make_snapshot(elem):
elem.set('{%s}project_id' % Extended_snapshot_attributes.namespace,
'%s:project_id' % Extended_snapshot_attributes.alias)
elem.set('{%s}progress' % Extended_snapshot_attributes.namespace,
'%s:progress' % Extended_snapshot_attributes.alias)
class ExtendedSnapshotAttributeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
alias = Extended_snapshot_attributes.alias
namespace = Extended_snapshot_attributes.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
class ExtendedSnapshotAttributesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
alias = Extended_snapshot_attributes.alias
namespace = Extended_snapshot_attributes.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
| 38.224 | 78 | 0.670783 |
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import volume
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer(
'volume',
'extended_snapshot_attributes')
class ExtendedSnapshotAttributesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedSnapshotAttributesController, self).__init__(*args,
**kwargs)
self.volume_api = volume.API()
def _get_snapshots(self, context):
snapshots = self.volume_api.get_all_snapshots(context)
rval = dict((snapshot['id'], snapshot) for snapshot in snapshots)
return rval
def _extend_snapshot(self, context, snapshot, data):
for attr in ['project_id', 'progress']:
key = "%s:%s" % (Extended_snapshot_attributes.alias, attr)
snapshot[key] = data[attr]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if authorize(context):
resp_obj.attach(xml=ExtendedSnapshotAttributeTemplate())
try:
snapshot = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
explanation = _("Snapshot not found.")
raise exc.HTTPNotFound(explanation=explanation)
self._extend_snapshot(context, resp_obj.obj['snapshot'], snapshot)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if authorize(context):
resp_obj.attach(xml=ExtendedSnapshotAttributesTemplate())
snapshots = list(resp_obj.obj.get('snapshots', []))
db_snapshots = self._get_snapshots(context)
for snapshot_object in snapshots:
try:
snapshot_data = db_snapshots[snapshot_object['id']]
except KeyError:
continue
self._extend_snapshot(context, snapshot_object, snapshot_data)
class Extended_snapshot_attributes(extensions.ExtensionDescriptor):
name = "ExtendedSnapshotAttributes"
alias = "os-extended-snapshot-attributes"
namespace = ("http://docs.openstack.org/volume/ext/"
"extended_snapshot_attributes/api/v1")
updated = "2012-06-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = ExtendedSnapshotAttributesController()
extension = extensions.ControllerExtension(self, 'snapshots',
controller)
return [extension]
def make_snapshot(elem):
elem.set('{%s}project_id' % Extended_snapshot_attributes.namespace,
'%s:project_id' % Extended_snapshot_attributes.alias)
elem.set('{%s}progress' % Extended_snapshot_attributes.namespace,
'%s:progress' % Extended_snapshot_attributes.alias)
class ExtendedSnapshotAttributeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
alias = Extended_snapshot_attributes.alias
namespace = Extended_snapshot_attributes.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
class ExtendedSnapshotAttributesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
alias = Extended_snapshot_attributes.alias
namespace = Extended_snapshot_attributes.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
| true | true |
1c47132929ce011d4b8ad6a8bcdf763d68fc9700 | 7,415 | py | Python | yapftests/split_penalty_test.py | hugovk/yapf | 831fb3d38d19f2bd1518426af68f22f21f5400b0 | [
"Apache-2.0"
] | null | null | null | yapftests/split_penalty_test.py | hugovk/yapf | 831fb3d38d19f2bd1518426af68f22f21f5400b0 | [
"Apache-2.0"
] | null | null | null | yapftests/split_penalty_test.py | hugovk/yapf | 831fb3d38d19f2bd1518426af68f22f21f5400b0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.split_penalty."""
import sys
import textwrap
import unittest
from lib2to3 import pytree
from yapf.pytree import pytree_utils
from yapf.pytree import pytree_visitor
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapftests import yapf_test_helper
UNBREAKABLE = split_penalty.UNBREAKABLE
VERY_STRONGLY_CONNECTED = split_penalty.VERY_STRONGLY_CONNECTED
DOTTED_NAME = split_penalty.DOTTED_NAME
STRONGLY_CONNECTED = split_penalty.STRONGLY_CONNECTED
class SplitPenaltyTest(yapf_test_helper.YAPFTest):
@classmethod
def setUpClass(cls):
style.SetGlobalStyle(style.CreateYapfStyle())
def _ParseAndComputePenalties(self, code, dumptree=False):
"""Parses the code and computes split penalties.
Arguments:
code: code to parse as a string
dumptree: if True, the parsed pytree (after penalty assignment) is dumped
to stderr. Useful for debugging.
Returns:
Parse tree.
"""
tree = pytree_utils.ParseCodeToTree(code)
split_penalty.ComputeSplitPenalties(tree)
if dumptree:
pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr)
return tree
def _CheckPenalties(self, tree, list_of_expected):
"""Check that the tokens in the tree have the correct penalties.
Args:
tree: the pytree.
list_of_expected: list of (name, penalty) pairs. Non-semantic tokens are
filtered out from the expected values.
"""
def FlattenRec(tree):
if pytree_utils.NodeName(tree) in pytree_utils.NONSEMANTIC_TOKENS:
return []
if isinstance(tree, pytree.Leaf):
return [(tree.value,
pytree_utils.GetNodeAnnotation(
tree, pytree_utils.Annotation.SPLIT_PENALTY))]
nodes = []
for node in tree.children:
nodes += FlattenRec(node)
return nodes
self.assertEqual(list_of_expected, FlattenRec(tree))
def testUnbreakable(self):
# Test function definitions.
code = textwrap.dedent(r"""
def foo(x):
pass
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('def', None),
('foo', UNBREAKABLE),
('(', UNBREAKABLE),
('x', None),
(')', STRONGLY_CONNECTED),
(':', UNBREAKABLE),
('pass', None),
])
# Test function definition with trailing comment.
code = textwrap.dedent(r"""
def foo(x): # trailing comment
pass
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('def', None),
('foo', UNBREAKABLE),
('(', UNBREAKABLE),
('x', None),
(')', STRONGLY_CONNECTED),
(':', UNBREAKABLE),
('pass', None),
])
# Test class definitions.
code = textwrap.dedent(r"""
class A:
pass
class B(A):
pass
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('class', None),
('A', UNBREAKABLE),
(':', UNBREAKABLE),
('pass', None),
('class', None),
('B', UNBREAKABLE),
('(', UNBREAKABLE),
('A', None),
(')', None),
(':', UNBREAKABLE),
('pass', None),
])
# Test lambda definitions.
code = textwrap.dedent(r"""
lambda a, b: None
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('lambda', None),
('a', VERY_STRONGLY_CONNECTED),
(',', VERY_STRONGLY_CONNECTED),
('b', VERY_STRONGLY_CONNECTED),
(':', VERY_STRONGLY_CONNECTED),
('None', VERY_STRONGLY_CONNECTED),
])
# Test dotted names.
code = textwrap.dedent(r"""
import a.b.c
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('import', None),
('a', None),
('.', UNBREAKABLE),
('b', UNBREAKABLE),
('.', UNBREAKABLE),
('c', UNBREAKABLE),
])
def testStronglyConnected(self):
# Test dictionary keys.
code = textwrap.dedent(r"""
a = {
'x': 42,
y(lambda a: 23): 37,
}
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('a', None),
('=', None),
('{', None),
("'x'", None),
(':', STRONGLY_CONNECTED),
('42', None),
(',', None),
('y', None),
('(', UNBREAKABLE),
('lambda', STRONGLY_CONNECTED),
('a', VERY_STRONGLY_CONNECTED),
(':', VERY_STRONGLY_CONNECTED),
('23', VERY_STRONGLY_CONNECTED),
(')', VERY_STRONGLY_CONNECTED),
(':', STRONGLY_CONNECTED),
('37', None),
(',', None),
('}', None),
])
# Test list comprehension.
code = textwrap.dedent(r"""
[a for a in foo if a.x == 37]
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('[', None),
('a', None),
('for', 0),
('a', STRONGLY_CONNECTED),
('in', STRONGLY_CONNECTED),
('foo', STRONGLY_CONNECTED),
('if', 0),
('a', STRONGLY_CONNECTED),
('.', VERY_STRONGLY_CONNECTED),
('x', DOTTED_NAME),
('==', STRONGLY_CONNECTED),
('37', STRONGLY_CONNECTED),
(']', None),
])
def testFuncCalls(self):
code = 'foo(1, 2, 3)\n'
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('foo', None),
('(', UNBREAKABLE),
('1', None),
(',', UNBREAKABLE),
('2', None),
(',', UNBREAKABLE),
('3', None),
(')', VERY_STRONGLY_CONNECTED),
])
# Now a method call, which has more than one trailer
code = 'foo.bar.baz(1, 2, 3)\n'
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('foo', None),
('.', VERY_STRONGLY_CONNECTED),
('bar', DOTTED_NAME),
('.', VERY_STRONGLY_CONNECTED),
('baz', DOTTED_NAME),
('(', STRONGLY_CONNECTED),
('1', None),
(',', UNBREAKABLE),
('2', None),
(',', UNBREAKABLE),
('3', None),
(')', VERY_STRONGLY_CONNECTED),
])
# Test single generator argument.
code = 'max(i for i in xrange(10))\n'
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('max', None),
('(', UNBREAKABLE),
('i', 0),
('for', 0),
('i', STRONGLY_CONNECTED),
('in', STRONGLY_CONNECTED),
('xrange', STRONGLY_CONNECTED),
('(', UNBREAKABLE),
('10', STRONGLY_CONNECTED),
(')', VERY_STRONGLY_CONNECTED),
(')', VERY_STRONGLY_CONNECTED),
])
if __name__ == '__main__':
unittest.main()
| 27.771536 | 79 | 0.578692 |
import sys
import textwrap
import unittest
from lib2to3 import pytree
from yapf.pytree import pytree_utils
from yapf.pytree import pytree_visitor
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapftests import yapf_test_helper
UNBREAKABLE = split_penalty.UNBREAKABLE
VERY_STRONGLY_CONNECTED = split_penalty.VERY_STRONGLY_CONNECTED
DOTTED_NAME = split_penalty.DOTTED_NAME
STRONGLY_CONNECTED = split_penalty.STRONGLY_CONNECTED
class SplitPenaltyTest(yapf_test_helper.YAPFTest):
@classmethod
def setUpClass(cls):
style.SetGlobalStyle(style.CreateYapfStyle())
def _ParseAndComputePenalties(self, code, dumptree=False):
tree = pytree_utils.ParseCodeToTree(code)
split_penalty.ComputeSplitPenalties(tree)
if dumptree:
pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr)
return tree
def _CheckPenalties(self, tree, list_of_expected):
def FlattenRec(tree):
if pytree_utils.NodeName(tree) in pytree_utils.NONSEMANTIC_TOKENS:
return []
if isinstance(tree, pytree.Leaf):
return [(tree.value,
pytree_utils.GetNodeAnnotation(
tree, pytree_utils.Annotation.SPLIT_PENALTY))]
nodes = []
for node in tree.children:
nodes += FlattenRec(node)
return nodes
self.assertEqual(list_of_expected, FlattenRec(tree))
def testUnbreakable(self):
code = textwrap.dedent(r"""
def foo(x):
pass
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('def', None),
('foo', UNBREAKABLE),
('(', UNBREAKABLE),
('x', None),
(')', STRONGLY_CONNECTED),
(':', UNBREAKABLE),
('pass', None),
])
code = textwrap.dedent(r"""
def foo(x): # trailing comment
pass
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('def', None),
('foo', UNBREAKABLE),
('(', UNBREAKABLE),
('x', None),
(')', STRONGLY_CONNECTED),
(':', UNBREAKABLE),
('pass', None),
])
code = textwrap.dedent(r"""
class A:
pass
class B(A):
pass
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('class', None),
('A', UNBREAKABLE),
(':', UNBREAKABLE),
('pass', None),
('class', None),
('B', UNBREAKABLE),
('(', UNBREAKABLE),
('A', None),
(')', None),
(':', UNBREAKABLE),
('pass', None),
])
code = textwrap.dedent(r"""
lambda a, b: None
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('lambda', None),
('a', VERY_STRONGLY_CONNECTED),
(',', VERY_STRONGLY_CONNECTED),
('b', VERY_STRONGLY_CONNECTED),
(':', VERY_STRONGLY_CONNECTED),
('None', VERY_STRONGLY_CONNECTED),
])
code = textwrap.dedent(r"""
import a.b.c
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('import', None),
('a', None),
('.', UNBREAKABLE),
('b', UNBREAKABLE),
('.', UNBREAKABLE),
('c', UNBREAKABLE),
])
def testStronglyConnected(self):
code = textwrap.dedent(r"""
a = {
'x': 42,
y(lambda a: 23): 37,
}
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('a', None),
('=', None),
('{', None),
("'x'", None),
(':', STRONGLY_CONNECTED),
('42', None),
(',', None),
('y', None),
('(', UNBREAKABLE),
('lambda', STRONGLY_CONNECTED),
('a', VERY_STRONGLY_CONNECTED),
(':', VERY_STRONGLY_CONNECTED),
('23', VERY_STRONGLY_CONNECTED),
(')', VERY_STRONGLY_CONNECTED),
(':', STRONGLY_CONNECTED),
('37', None),
(',', None),
('}', None),
])
code = textwrap.dedent(r"""
[a for a in foo if a.x == 37]
""")
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('[', None),
('a', None),
('for', 0),
('a', STRONGLY_CONNECTED),
('in', STRONGLY_CONNECTED),
('foo', STRONGLY_CONNECTED),
('if', 0),
('a', STRONGLY_CONNECTED),
('.', VERY_STRONGLY_CONNECTED),
('x', DOTTED_NAME),
('==', STRONGLY_CONNECTED),
('37', STRONGLY_CONNECTED),
(']', None),
])
def testFuncCalls(self):
code = 'foo(1, 2, 3)\n'
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('foo', None),
('(', UNBREAKABLE),
('1', None),
(',', UNBREAKABLE),
('2', None),
(',', UNBREAKABLE),
('3', None),
(')', VERY_STRONGLY_CONNECTED),
])
code = 'foo.bar.baz(1, 2, 3)\n'
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('foo', None),
('.', VERY_STRONGLY_CONNECTED),
('bar', DOTTED_NAME),
('.', VERY_STRONGLY_CONNECTED),
('baz', DOTTED_NAME),
('(', STRONGLY_CONNECTED),
('1', None),
(',', UNBREAKABLE),
('2', None),
(',', UNBREAKABLE),
('3', None),
(')', VERY_STRONGLY_CONNECTED),
])
code = 'max(i for i in xrange(10))\n'
tree = self._ParseAndComputePenalties(code)
self._CheckPenalties(tree, [
('max', None),
('(', UNBREAKABLE),
('i', 0),
('for', 0),
('i', STRONGLY_CONNECTED),
('in', STRONGLY_CONNECTED),
('xrange', STRONGLY_CONNECTED),
('(', UNBREAKABLE),
('10', STRONGLY_CONNECTED),
(')', VERY_STRONGLY_CONNECTED),
(')', VERY_STRONGLY_CONNECTED),
])
if __name__ == '__main__':
unittest.main()
| true | true |
1c47143c0de39dc172c34feae1dc157a3f004a9e | 1,024 | py | Python | demo_python/socket/server2.py | coderlongren/Scrapy | c0678fcf55c16b5f74e08158761b79da910172e4 | [
"MIT"
] | 5 | 2017-12-03T11:35:56.000Z | 2018-03-22T06:49:03.000Z | demo_python/socket/server2.py | coderlongren/Scrapy | c0678fcf55c16b5f74e08158761b79da910172e4 | [
"MIT"
] | null | null | null | demo_python/socket/server2.py | coderlongren/Scrapy | c0678fcf55c16b5f74e08158761b79da910172e4 | [
"MIT"
] | null | null | null | #coding:utf-8
from SocketServer import TCPServer, BaseRequestHandler
import traceback
class MyBaseRequestHandlerr(BaseRequestHandler):
"""
#从BaseRequestHandler继承,并重写handle方法
"""
def handle(self):
#循环监听(读取)来自客户端的数据
while True:
#当客户端主动断开连接时,self.recv(1024)会抛出异常
try:
#一次读取1024字节,并去除两端的空白字符(包括空格,TAB,\r,\n)
data = self.request.recv(1024).strip()
#self.client_address是客户端的连接(host, port)的元组
print "receive from (%r):%r" % (self.client_address, data)
#转换成大写后写回(发生到)客户端
self.request.sendall(data.upper()+'\n')
except:
traceback.print_exc()
break
if __name__ == "__main__":
#telnet 127.0.0.1 9999
host = ""
port = 8080 #端口
addr = (host, port)
#购置TCPServer对象,
server = TCPServer(addr, MyBaseRequestHandlerr)
#启动服务监听
server.serve_forever()
| 26.25641 | 76 | 0.545898 |
from SocketServer import TCPServer, BaseRequestHandler
import traceback
class MyBaseRequestHandlerr(BaseRequestHandler):
"""
#从BaseRequestHandler继承,并重写handle方法
"""
def handle(self):
while True:
try:
data = self.request.recv(1024).strip()
print "receive from (%r):%r" % (self.client_address, data)
self.request.sendall(data.upper()+'\n')
except:
traceback.print_exc()
break
if __name__ == "__main__":
host = ""
port = 8080
addr = (host, port)
server = TCPServer(addr, MyBaseRequestHandlerr)
server.serve_forever()
| false | true |
1c4714491879cc91b48a7f0c09b4c8ab6d87e93d | 3,659 | py | Python | evaluate.py | Prettyfinger/Twostream_reID | 8e340e0c03bd248b04ff1b48398ca99b6aeaa508 | [
"MIT"
] | 6 | 2019-05-17T03:40:59.000Z | 2021-04-09T11:01:54.000Z | evaluate.py | Prettyfinger/Twostream_reID | 8e340e0c03bd248b04ff1b48398ca99b6aeaa508 | [
"MIT"
] | null | null | null | evaluate.py | Prettyfinger/Twostream_reID | 8e340e0c03bd248b04ff1b48398ca99b6aeaa508 | [
"MIT"
] | 2 | 2019-09-12T06:19:05.000Z | 2020-06-12T11:34:12.000Z | import scipy.io
import torch
import numpy as np
#import time
import os
#######################################################################
# Evaluate
def evaluate(qf,ql,qc,gf,gl,gc):
query = qf
score = np.dot(gf,query)
# predict index
index = np.argsort(score) #from small to large
index = index[::-1] #19732
#index = index[0:2000]
# good index
query_index = np.argwhere(gl==ql) #59
camera_index = np.argwhere(gc==qc)#3156
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)#59-8=51 the same peron in different cameras
junk_index1 = np.argwhere(gl==-1)#3819:part body
junk_index2 = np.intersect1d(query_index, camera_index) # 8 the same person in the same camera
junk_index = np.append(junk_index2, junk_index1) #.flatten()) #3827=3819+8
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = torch.IntTensor(len(index)).zero_()
if good_index.size==0: # if empty
cmc[0] = -1
return ap,cmc
# remove junk_index
mask = np.in1d(index, junk_index, invert=True) #19732
index = index[mask] #15950=19732-8-3819 index remove (the same person in same camera) and (label=-1)
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index)
rows_good = np.argwhere(mask==True)
rows_good = rows_good.flatten()
cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = 1.0/ngood
precision = (i+1)*1.0/(rows_good[i]+1)
if rows_good[i]!=0:
old_precision = i*1.0/rows_good[i]
else:
old_precision=1.0
ap = ap + d_recall*(old_precision + precision)/2
return ap, cmc
######################################################################
result = scipy.io.loadmat('twostream_Market1501_SeSC.mat')
query_feature = result['query_f']
query_cam = result['query_cam'][0]
query_label = result['query_label'][0]
gallery_feature = result['gallery_f']
gallery_cam = result['gallery_cam'][0]
gallery_label = result['gallery_label'][0]
multi = os.path.isfile('multi_query.mat')
if multi:
m_result = scipy.io.loadmat('multi_query.mat')
mquery_feature = m_result['mquery_f']
mquery_cam = m_result['mquery_cam'][0]
mquery_label = m_result['mquery_label'][0]
CMC = torch.IntTensor(len(gallery_label)).zero_()
ap = 0.0
#print(query_label)
for i in range(len(query_label)):
ap_tmp, CMC_tmp = evaluate(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)
if CMC_tmp[0]==-1:
continue
CMC = CMC + CMC_tmp
ap += ap_tmp
# print(i, CMC_tmp[0])
CMC = CMC.float()
CMC = CMC/len(query_label) #average CMC
print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
# multiple-query
CMC = torch.IntTensor(len(gallery_label)).zero_()
ap = 0.0
if multi:
for i in range(len(query_label)):
mquery_index1 = np.argwhere(mquery_label==query_label[i])
mquery_index2 = np.argwhere(mquery_cam==query_cam[i])
mquery_index = np.intersect1d(mquery_index1, mquery_index2)
mq = np.mean(mquery_feature[mquery_index,:], axis=0)
ap_tmp, CMC_tmp = evaluate(mq,query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)
if CMC_tmp[0]==-1:
continue
CMC = CMC + CMC_tmp
ap += ap_tmp
#print(i, CMC_tmp[0])
CMC = CMC.float()
CMC = CMC/len(query_label) #average CMC
print('multi Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
| 33.87963 | 120 | 0.636239 | import scipy.io
import torch
import numpy as np
import os
| true | true |
1c4714b6c1ba2640b2df051cf15a452b139f7891 | 1,251 | py | Python | extra/unused/kml_wesn_filter.py | whyjz/CARST | 875c915e835b0e09a7eccb58833719bbfc85b635 | [
"MIT"
] | 10 | 2018-01-02T18:03:07.000Z | 2022-01-25T05:36:21.000Z | extra/unused/kml_wesn_filter.py | whyjz/CARST | 875c915e835b0e09a7eccb58833719bbfc85b635 | [
"MIT"
] | 1 | 2020-04-14T16:57:15.000Z | 2020-05-15T16:10:17.000Z | extra/unused/kml_wesn_filter.py | whyjz/CARST | 875c915e835b0e09a7eccb58833719bbfc85b635 | [
"MIT"
] | 4 | 2016-08-12T15:06:48.000Z | 2019-11-27T05:33:50.000Z | #!/usr/bin/python
import re;
import sys;
name=sys.argv[1];
w=sys.argv[2];
e=sys.argv[3];
s=sys.argv[4];
n=sys.argv[5];
coords_str="";
kml="";
temp="";
coords="";
outside=False;
infile=open(name,"r");
while 1:
line=infile.readline();
if not line:
break;
if line.find("<Placemark") < 0:
kml=kml+line;
else:
temp=temp+line;
while 1:
line=infile.readline();
if not line:
break;
temp=temp+line;
if line.find("</Placemark") > -1:
if not outside:
kml=kml+temp;
temp="";
outside=False;
break;
if line.find("<coordinates") > -1:
coords=coords+line+" ";
if line.find("</coordinates") < 0:
while 1:
line=infile.readline();
if not line:
break;
temp=temp+line;
coords=coords+line.strip()+" ";
if line.find("</coordinates") > -1:
break;
coords_list=coords[coords.find("<coordinates>")+13:coords.find("</coordinates>")].split();
for coord in coords_list:
coord=coord.replace(","," ");
elements=coord.split();
lon=float(elements[0]);
lat=float(elements[1]);
if lon < float(w) or lon > float(e) or lat < float(s) or lat > float(n):
outside=True;
break;
coords="";
infile.close();
print(kml);
exit();
| 18.397059 | 94 | 0.581135 |
import re;
import sys;
name=sys.argv[1];
w=sys.argv[2];
e=sys.argv[3];
s=sys.argv[4];
n=sys.argv[5];
coords_str="";
kml="";
temp="";
coords="";
outside=False;
infile=open(name,"r");
while 1:
line=infile.readline();
if not line:
break;
if line.find("<Placemark") < 0:
kml=kml+line;
else:
temp=temp+line;
while 1:
line=infile.readline();
if not line:
break;
temp=temp+line;
if line.find("</Placemark") > -1:
if not outside:
kml=kml+temp;
temp="";
outside=False;
break;
if line.find("<coordinates") > -1:
coords=coords+line+" ";
if line.find("</coordinates") < 0:
while 1:
line=infile.readline();
if not line:
break;
temp=temp+line;
coords=coords+line.strip()+" ";
if line.find("</coordinates") > -1:
break;
coords_list=coords[coords.find("<coordinates>")+13:coords.find("</coordinates>")].split();
for coord in coords_list:
coord=coord.replace(","," ");
elements=coord.split();
lon=float(elements[0]);
lat=float(elements[1]);
if lon < float(w) or lon > float(e) or lat < float(s) or lat > float(n):
outside=True;
break;
coords="";
infile.close();
print(kml);
exit();
| true | true |
1c471503d318cedfa20271abbfe08577cd1b9640 | 15,148 | py | Python | onnx2caffe/_operators.py | troyliu0105/onnx2caffe | d369e774ed216f1c9cbc2f3610c8c1b9c3364f97 | [
"MIT"
] | null | null | null | onnx2caffe/_operators.py | troyliu0105/onnx2caffe | d369e774ed216f1c9cbc2f3610c8c1b9c3364f97 | [
"MIT"
] | null | null | null | onnx2caffe/_operators.py | troyliu0105/onnx2caffe | d369e774ed216f1c9cbc2f3610c8c1b9c3364f97 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from caffe import params as P
from MyCaffe import Function as myf
def _compare(a, b, encoding="utf8"): # type: (Text, Text, Text) -> bool
if isinstance(a, bytes):
a = a.decode(encoding)
if isinstance(b, bytes):
b = b.decode(encoding)
return a == b
def make_input(input):
name = input[0]
output = input[0]
output = [output]
shape = input[2]
shape = list(shape)
input_layer = myf("Input", name, [], output, input_param=dict(shape=dict(dim=shape)))
return input_layer
def _convert_conv(node, graph, err):
weight_name = node.inputs[1]
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
node_name = node.name
W = None
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name, ))
is_deconv = False
if node.op_type.endswith("Transpose"):
is_deconv = True
bias_flag = False
bias = None
if len(node.inputs) > 2:
bias = node.input_tensors[node.inputs[2]]
bias_flag = True
dilations = node.attrs.get("dilations", [1, 1])
# groups = 1
groups = node.attrs.get("group", 1)
kernel_shape = node.attrs["kernel_shape"]
pads = node.attrs.get("pads", [0, 0, 0, 0])
strides = node.attrs["strides"]
layer = myf("Convolution", node_name, [input_name], [output_name],
kernel_h=kernel_shape[0], kernel_w=kernel_shape[1],
stride_h=strides[0], stride_w=strides[1], group=groups,
pad_h=pads[0], pad_w=pads[1],
num_output=W.shape[0], dilation=dilations[0], bias_term=bias_flag)
graph.channel_dims[output_name] = W.shape[0]
return layer
def _convert_relu(node, graph, err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
name = str(node.name)
if input_name == output_name:
inplace = True
else:
inplace = False
layer = myf("ReLU", name, [input_name], [output_name], in_place=inplace)
# l_top_relu1 = L.ReLU(l_bottom, name=name, in_place=True)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_sigmoid(node, graph, err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
name = str(node.name)
if input_name == output_name:
inplace = True
else:
inplace = False
layer = myf("Sigmoid", name, [input_name], [output_name], in_place=inplace)
# l_top_relu1 = L.ReLU(l_bottom, name=name, in_place=True)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_BatchNorm(node, graph, err):
epsilon = node.attrs.get("epsilon", 1e-5)
scale = node.input_tensors[node.inputs[1]]
bias = node.input_tensors[node.inputs[2]]
mean = node.input_tensors[node.inputs[3]]
var = node.input_tensors[node.inputs[4]]
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if input_name == output_name:
inplace = True
else:
inplace = False
bn_layer = myf("BatchNorm", node_name + "_bn", [input_name], [output_name], eps=epsilon, use_global_stats=True,
in_place=inplace)
scale_layer = myf("Scale", node_name, [output_name], [output_name], in_place=True, bias_term=True)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return bn_layer, scale_layer
def _convert_Add(node, graph, err):
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
node_name = node.name
max_dim = 0
for name in input_name_list:
if graph.channel_dims[name] > max_dim:
max_dim = graph.channel_dims[name]
if 'broadcast' in node.attrs:
if node.attrs['broadcast'] == 1:
input_node_number = len(input_name_list)
if input_node_number != 2:
return err.unsupported_op_configuration(node, "Broadcast Add must has 2 input, not {}".format(
input_node_number))
axis = node.attrs['axis']
flat_layer = myf("Flatten", node_name + '_flat', [input_name_list[1]], [output_name + '_flat'])
layer = myf("Bias", node_name, [input_name_list[0], output_name + '_flat'], [output_name], axis=axis)
# layer = myf("Bias", node_name, input_name_list, [output_name], bias_term = False, axis = axis)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return flat_layer, layer
layer = myf("Eltwise", node_name, input_name_list, [output_name], operation=P.Eltwise.SUM)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_Mul(node, graph, err):
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
node_name = node.name
# max_dim = 0
# for name in input_name_list:
# if graph.channel_dims[name]>max_dim:
# max_dim = graph.channel_dims[name]
if 'broadcast' in node.attrs:
if node.attrs['broadcast'] == 1:
input_node_number = len(input_name_list)
if input_node_number != 2:
return err.unsupported_op_configuration(node, "Broadcast Mul must has 2 input, not {}".format(
input_node_number))
axis = node.attrs['axis']
flat_layer = myf("Flatten", node_name + '_flat', [input_name_list[1]], [output_name + '_flat'])
layer = myf("Scale", node_name, [input_name_list[0], output_name + '_flat'], [output_name], bias_term=False,
axis=axis)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return flat_layer, layer
layer = myf("Eltwise", node_name, input_name_list, [output_name], operation=P.Eltwise.PROD)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_Reshape(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if len(node.inputs) == 1:
shape = tuple(node.attrs.get('shape', ()))
else:
shape = tuple(node.input_tensors[node.inputs[1]])
# if shape == ():
if input_name == output_name:
inplace = True
else:
inplace = False
if len(shape) == 2:
layer = myf("Flatten", node_name, [input_name], [output_name], in_place=inplace)
graph.channel_dims[output_name] = shape[1]
return layer
elif len(shape) == 4:
graph.channel_dims[output_name] = shape[1]
layer = myf("Reshape", node_name, [input_name], [output_name], reshape_param=dict(shape=dict(dim=list(shape))))
return layer
else:
return err.unsupported_op_configuration(node, "Reshape dimention number shall be 2 or 4")
def _convert_Flatten(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
# shape = tuple(node.attrs.get('shape', ()))
if input_name == output_name:
inplace = True
else:
inplace = False
layer = myf("Flatten", node_name, [input_name], [output_name], in_place=inplace)
# graph.channel_dims[output_name] = shape[1]
return layer
def _convert_pool(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if node.op_type.endswith("MaxPool"):
pool_type = P.Pooling.MAX
elif node.op_type.endswith("AveragePool"):
pool_type = P.Pooling.AVE
else:
return err.unsupported_op_configuration(node, "Unsupported pool type")
global_pooling = int(node.op_type.startswith("Global"))
if global_pooling == 0:
kernel_shape = node.attrs["kernel_shape"]
strides = node.attrs.get('strides', [1, 1])
pads = node.attrs.get('pads', [0, 0, 0, 0])
pooling_param = dict(pool=pool_type,
kernel_h=kernel_shape[0],
kernel_w=kernel_shape[1],
stride_h=strides[0],
stride_w=strides[1],
pad_h=pads[0],
pad_w=pads[1],
global_pooling=global_pooling)
else:
pooling_param = dict(pool=pool_type,
global_pooling=global_pooling)
layer = myf("Pooling", node_name, [input_name], [output_name], pooling_param=pooling_param)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_dropout(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
ratio = node.attrs.get('ratio', 0.5)
layer = myf("Dropout", node_name, [input_name], [output_name], dropout_ratio=ratio)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_gemm(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
weight_name = node.inputs[1]
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name, ))
return
if ("broadcast" in node.attrs and node.attrs["broadcast"] != 1) or node.attrs["transB"] != 1:
return err.unsupported_op_configuration(node, "Gemm is supported only for inner_product layer")
b = None
bias_flag = False
if len(node.inputs) > 2:
b = node.input_tensors[node.inputs[2]]
if len(W.shape) != 2 or (b is not None and len(b.shape) != 1):
return err.unsupported_op_configuration(node, "Gemm is supported only for inner_product layer")
if b is not None:
bias_flag = True
if W.shape[0] != b.shape[0]:
return err.unsupported_op_configuration(node,
"Gemm is supported only for inner_product layer")
layer = myf("InnerProduct", node_name, [input_name], [output_name], num_output=W.shape[0], bias_term=bias_flag)
graph.channel_dims[output_name] = W.shape[0]
return layer
def _convert_upsample(node, graph, err):
factor = int(node.attrs["height_scale"])
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
# input_shape = graph.shape_dict[input_name]
# channels = input_shape[1]
channels = graph.channel_dims[input_name]
pad = int(math.ceil((factor - 1) / 2.))
# layer = myf("Deconvolution", node_name, [input_name], [output_name],
# kernel_size=2 * factor - factor % 2,
# stride=factor, group=channels,
# pad = pad, num_output=channels, bias_term = False)
mode = node.attrs["mode"]
# https://github.com/pytorch/pytorch/issues/6900
if mode == "bilinear":
layer = myf("Deconvolution", node_name, [input_name], [output_name],
convolution_param=dict(
num_output=channels,
kernel_size=2 * factor - factor % 2,
stride=factor,
pad=pad,
group=channels,
bias_term=False,
weight_filler=dict(type="bilinear_upsampling")
))
else:
layer = myf("Deconvolution", node_name, [input_name], [output_name],
convolution_param=dict(
num_output=channels,
kernel_size=factor,
stride=factor,
group=channels,
bias_term=False,
))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_concat(node, graph, err):
node_name = node.name
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
axis = node.attrs.get("axis", 1)
layer = myf('Concat', node_name, input_name_list, [output_name], axis=axis)
if axis == 1:
dim = 0
for name in input_name_list:
dim += graph.channel_dims[name]
graph.channel_dims[output_name] = dim
else:
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_conv_transpose(node, graph, err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
node_name = node.name
weight_name = node.inputs[1]
W = None
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name, ))
bias_flag = False
bias = None
if len(node.inputs) > 2:
bias = node.input_tensors[node.inputs[2]]
bias_flag = True
dilations = node.attrs.get("dilations", [1, 1])
# groups = 1
groups = node.attrs.get("group", 1)
kernel_shape = node.attrs["kernel_shape"]
pads = node.attrs.get("pads", [0, 0, 0, 0])
strides = node.attrs["strides"]
layer = myf('Deconvolution', node_name, [input_name], [output_name],
convolution_param=dict(
num_output=W.shape[1],
kernel_h=kernel_shape[0], kernel_w=kernel_shape[1],
stride_h=strides[0], stride_w=strides[1],
group=groups,
pad_h=pads[0], pad_w=pads[1],
bias_term=bias_flag,
))
graph.channel_dims[output_name] = W.shape[1]
return layer
# l_top = L.Deconvolution(
# l_bottom,
# name=name,
# convolution_param=dict(
# num_output=W.shape[1],
# kernel_h=kernel_h,
# kernel_w=kernel_w,
# stride_h=stride_h,
# stride_w=stride_w,
# pad_h=pad_h,
# pad_w=pad_w,
# group=groups,
# bias_term=bias_term))
_ONNX_NODE_REGISTRY = {
"Conv": _convert_conv,
"Relu": _convert_relu,
"BatchNormalization": _convert_BatchNorm,
"Add": _convert_Add,
"Mul": _convert_Mul,
"Reshape": _convert_Reshape,
"MaxPool": _convert_pool,
"AveragePool": _convert_pool,
"GlobalAveragePool": _convert_pool,
"Dropout": _convert_dropout,
"Gemm": _convert_gemm,
"Upsample": _convert_upsample,
"Concat": _convert_concat,
"ConvTranspose": _convert_conv_transpose,
"Sigmoid": _convert_sigmoid,
"Flatten": _convert_Flatten,
}
| 35.392523 | 120 | 0.616319 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from caffe import params as P
from MyCaffe import Function as myf
def _compare(a, b, encoding="utf8"):
if isinstance(a, bytes):
a = a.decode(encoding)
if isinstance(b, bytes):
b = b.decode(encoding)
return a == b
def make_input(input):
name = input[0]
output = input[0]
output = [output]
shape = input[2]
shape = list(shape)
input_layer = myf("Input", name, [], output, input_param=dict(shape=dict(dim=shape)))
return input_layer
def _convert_conv(node, graph, err):
weight_name = node.inputs[1]
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
node_name = node.name
W = None
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name, ))
is_deconv = False
if node.op_type.endswith("Transpose"):
is_deconv = True
bias_flag = False
bias = None
if len(node.inputs) > 2:
bias = node.input_tensors[node.inputs[2]]
bias_flag = True
dilations = node.attrs.get("dilations", [1, 1])
groups = node.attrs.get("group", 1)
kernel_shape = node.attrs["kernel_shape"]
pads = node.attrs.get("pads", [0, 0, 0, 0])
strides = node.attrs["strides"]
layer = myf("Convolution", node_name, [input_name], [output_name],
kernel_h=kernel_shape[0], kernel_w=kernel_shape[1],
stride_h=strides[0], stride_w=strides[1], group=groups,
pad_h=pads[0], pad_w=pads[1],
num_output=W.shape[0], dilation=dilations[0], bias_term=bias_flag)
graph.channel_dims[output_name] = W.shape[0]
return layer
def _convert_relu(node, graph, err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
name = str(node.name)
if input_name == output_name:
inplace = True
else:
inplace = False
layer = myf("ReLU", name, [input_name], [output_name], in_place=inplace)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_sigmoid(node, graph, err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
name = str(node.name)
if input_name == output_name:
inplace = True
else:
inplace = False
layer = myf("Sigmoid", name, [input_name], [output_name], in_place=inplace)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_BatchNorm(node, graph, err):
epsilon = node.attrs.get("epsilon", 1e-5)
scale = node.input_tensors[node.inputs[1]]
bias = node.input_tensors[node.inputs[2]]
mean = node.input_tensors[node.inputs[3]]
var = node.input_tensors[node.inputs[4]]
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if input_name == output_name:
inplace = True
else:
inplace = False
bn_layer = myf("BatchNorm", node_name + "_bn", [input_name], [output_name], eps=epsilon, use_global_stats=True,
in_place=inplace)
scale_layer = myf("Scale", node_name, [output_name], [output_name], in_place=True, bias_term=True)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return bn_layer, scale_layer
def _convert_Add(node, graph, err):
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
node_name = node.name
max_dim = 0
for name in input_name_list:
if graph.channel_dims[name] > max_dim:
max_dim = graph.channel_dims[name]
if 'broadcast' in node.attrs:
if node.attrs['broadcast'] == 1:
input_node_number = len(input_name_list)
if input_node_number != 2:
return err.unsupported_op_configuration(node, "Broadcast Add must has 2 input, not {}".format(
input_node_number))
axis = node.attrs['axis']
flat_layer = myf("Flatten", node_name + '_flat', [input_name_list[1]], [output_name + '_flat'])
layer = myf("Bias", node_name, [input_name_list[0], output_name + '_flat'], [output_name], axis=axis)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return flat_layer, layer
layer = myf("Eltwise", node_name, input_name_list, [output_name], operation=P.Eltwise.SUM)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_Mul(node, graph, err):
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
node_name = node.name
if 'broadcast' in node.attrs:
if node.attrs['broadcast'] == 1:
input_node_number = len(input_name_list)
if input_node_number != 2:
return err.unsupported_op_configuration(node, "Broadcast Mul must has 2 input, not {}".format(
input_node_number))
axis = node.attrs['axis']
flat_layer = myf("Flatten", node_name + '_flat', [input_name_list[1]], [output_name + '_flat'])
layer = myf("Scale", node_name, [input_name_list[0], output_name + '_flat'], [output_name], bias_term=False,
axis=axis)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return flat_layer, layer
layer = myf("Eltwise", node_name, input_name_list, [output_name], operation=P.Eltwise.PROD)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_Reshape(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if len(node.inputs) == 1:
shape = tuple(node.attrs.get('shape', ()))
else:
shape = tuple(node.input_tensors[node.inputs[1]])
if input_name == output_name:
inplace = True
else:
inplace = False
if len(shape) == 2:
layer = myf("Flatten", node_name, [input_name], [output_name], in_place=inplace)
graph.channel_dims[output_name] = shape[1]
return layer
elif len(shape) == 4:
graph.channel_dims[output_name] = shape[1]
layer = myf("Reshape", node_name, [input_name], [output_name], reshape_param=dict(shape=dict(dim=list(shape))))
return layer
else:
return err.unsupported_op_configuration(node, "Reshape dimention number shall be 2 or 4")
def _convert_Flatten(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if input_name == output_name:
inplace = True
else:
inplace = False
layer = myf("Flatten", node_name, [input_name], [output_name], in_place=inplace)
return layer
def _convert_pool(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if node.op_type.endswith("MaxPool"):
pool_type = P.Pooling.MAX
elif node.op_type.endswith("AveragePool"):
pool_type = P.Pooling.AVE
else:
return err.unsupported_op_configuration(node, "Unsupported pool type")
global_pooling = int(node.op_type.startswith("Global"))
if global_pooling == 0:
kernel_shape = node.attrs["kernel_shape"]
strides = node.attrs.get('strides', [1, 1])
pads = node.attrs.get('pads', [0, 0, 0, 0])
pooling_param = dict(pool=pool_type,
kernel_h=kernel_shape[0],
kernel_w=kernel_shape[1],
stride_h=strides[0],
stride_w=strides[1],
pad_h=pads[0],
pad_w=pads[1],
global_pooling=global_pooling)
else:
pooling_param = dict(pool=pool_type,
global_pooling=global_pooling)
layer = myf("Pooling", node_name, [input_name], [output_name], pooling_param=pooling_param)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_dropout(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
ratio = node.attrs.get('ratio', 0.5)
layer = myf("Dropout", node_name, [input_name], [output_name], dropout_ratio=ratio)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_gemm(node, graph, err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
weight_name = node.inputs[1]
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name, ))
return
if ("broadcast" in node.attrs and node.attrs["broadcast"] != 1) or node.attrs["transB"] != 1:
return err.unsupported_op_configuration(node, "Gemm is supported only for inner_product layer")
b = None
bias_flag = False
if len(node.inputs) > 2:
b = node.input_tensors[node.inputs[2]]
if len(W.shape) != 2 or (b is not None and len(b.shape) != 1):
return err.unsupported_op_configuration(node, "Gemm is supported only for inner_product layer")
if b is not None:
bias_flag = True
if W.shape[0] != b.shape[0]:
return err.unsupported_op_configuration(node,
"Gemm is supported only for inner_product layer")
layer = myf("InnerProduct", node_name, [input_name], [output_name], num_output=W.shape[0], bias_term=bias_flag)
graph.channel_dims[output_name] = W.shape[0]
return layer
def _convert_upsample(node, graph, err):
factor = int(node.attrs["height_scale"])
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
channels = graph.channel_dims[input_name]
pad = int(math.ceil((factor - 1) / 2.))
mode = node.attrs["mode"]
if mode == "bilinear":
layer = myf("Deconvolution", node_name, [input_name], [output_name],
convolution_param=dict(
num_output=channels,
kernel_size=2 * factor - factor % 2,
stride=factor,
pad=pad,
group=channels,
bias_term=False,
weight_filler=dict(type="bilinear_upsampling")
))
else:
layer = myf("Deconvolution", node_name, [input_name], [output_name],
convolution_param=dict(
num_output=channels,
kernel_size=factor,
stride=factor,
group=channels,
bias_term=False,
))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_concat(node, graph, err):
node_name = node.name
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
axis = node.attrs.get("axis", 1)
layer = myf('Concat', node_name, input_name_list, [output_name], axis=axis)
if axis == 1:
dim = 0
for name in input_name_list:
dim += graph.channel_dims[name]
graph.channel_dims[output_name] = dim
else:
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_conv_transpose(node, graph, err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
node_name = node.name
weight_name = node.inputs[1]
W = None
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name, ))
bias_flag = False
bias = None
if len(node.inputs) > 2:
bias = node.input_tensors[node.inputs[2]]
bias_flag = True
dilations = node.attrs.get("dilations", [1, 1])
groups = node.attrs.get("group", 1)
kernel_shape = node.attrs["kernel_shape"]
pads = node.attrs.get("pads", [0, 0, 0, 0])
strides = node.attrs["strides"]
layer = myf('Deconvolution', node_name, [input_name], [output_name],
convolution_param=dict(
num_output=W.shape[1],
kernel_h=kernel_shape[0], kernel_w=kernel_shape[1],
stride_h=strides[0], stride_w=strides[1],
group=groups,
pad_h=pads[0], pad_w=pads[1],
bias_term=bias_flag,
))
graph.channel_dims[output_name] = W.shape[1]
return layer
_ONNX_NODE_REGISTRY = {
"Conv": _convert_conv,
"Relu": _convert_relu,
"BatchNormalization": _convert_BatchNorm,
"Add": _convert_Add,
"Mul": _convert_Mul,
"Reshape": _convert_Reshape,
"MaxPool": _convert_pool,
"AveragePool": _convert_pool,
"GlobalAveragePool": _convert_pool,
"Dropout": _convert_dropout,
"Gemm": _convert_gemm,
"Upsample": _convert_upsample,
"Concat": _convert_concat,
"ConvTranspose": _convert_conv_transpose,
"Sigmoid": _convert_sigmoid,
"Flatten": _convert_Flatten,
}
| true | true |
1c471581b148d8da944b7d385ff56958dcd3c839 | 80 | py | Python | satori.core/satori/core/sec/__init__.py | Cloud11665/satori-git | ea1855a920c98b480423bf247bce6e5626985c4a | [
"MIT"
] | 4 | 2021-01-05T01:35:36.000Z | 2021-12-13T00:05:14.000Z | satori.core/satori/core/sec/__init__.py | Cloud11665/satori-git | ea1855a920c98b480423bf247bce6e5626985c4a | [
"MIT"
] | 2 | 2020-06-06T01:12:07.000Z | 2020-06-06T01:16:01.000Z | satori.core/satori/core/sec/__init__.py | Cloud11665/satori-git | ea1855a920c98b480423bf247bce6e5626985c4a | [
"MIT"
] | 2 | 2021-01-05T01:33:30.000Z | 2021-03-06T13:48:21.000Z | # vim:ts=4:sts=4:sw=4:expandtab
"""
Security and authorization procedures.
"""
| 13.333333 | 38 | 0.7 | true | true | |
1c47178b6382cec7eefc639b882b988952869b5a | 717 | py | Python | setup.py | andrey-avdeev/telemetry | 0c70b410079616634ff1895b360d8d9b8a65f046 | [
"Apache-2.0"
] | 13 | 2019-12-01T08:05:25.000Z | 2020-05-19T10:43:49.000Z | setup.py | andrey-avdeev/telemetry | 0c70b410079616634ff1895b360d8d9b8a65f046 | [
"Apache-2.0"
] | 6 | 2019-12-02T07:43:49.000Z | 2019-12-02T07:52:15.000Z | setup.py | andrey-avdeev/telemetry | 0c70b410079616634ff1895b360d8d9b8a65f046 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import io
from setuptools import setup
with io.open("README.md", "r", encoding="utf-8") as f:
readme = f.read()
setup(
name="telemetry",
version="0.0.1",
description="Profiling in production",
long_description=readme,
long_description_content_type="text/markdown",
author="Andrey Avdeev",
author_email="seorazer@gmail.com",
license="Apache 2.0",
packages=["telemetry"],
zip_safe=False,
python_requires=">=3.7",
install_requires=["loguru>=0.3.2", "statsd>=3.3.0"],
keywords="statsd telemetry",
url="https://github.com/andrey-avdeev/telemetry",
download_url='https://github.com/andrey-avdeev/telemetry/archive/v_0.0.1.tar.gz'
)
| 27.576923 | 84 | 0.666667 |
import io
from setuptools import setup
with io.open("README.md", "r", encoding="utf-8") as f:
readme = f.read()
setup(
name="telemetry",
version="0.0.1",
description="Profiling in production",
long_description=readme,
long_description_content_type="text/markdown",
author="Andrey Avdeev",
author_email="seorazer@gmail.com",
license="Apache 2.0",
packages=["telemetry"],
zip_safe=False,
python_requires=">=3.7",
install_requires=["loguru>=0.3.2", "statsd>=3.3.0"],
keywords="statsd telemetry",
url="https://github.com/andrey-avdeev/telemetry",
download_url='https://github.com/andrey-avdeev/telemetry/archive/v_0.0.1.tar.gz'
)
| true | true |
1c4717eb7bcd8e66085457e0e315a1117fae6d1b | 487 | py | Python | students/K33421/practical_works/Dzhapua_Esnat/django_project_dzhapua/project_first_app/migrations/0005_auto_20210112_1735.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | students/K33421/practical_works/Dzhapua_Esnat/django_project_dzhapua/project_first_app/migrations/0005_auto_20210112_1735.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | students/K33421/practical_works/Dzhapua_Esnat/django_project_dzhapua/project_first_app/migrations/0005_auto_20210112_1735.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2021-01-12 14:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0004_auto_20210112_1725'),
]
operations = [
migrations.AlterField(
model_name='license',
name='type',
field=models.CharField(choices=[('D', 'Bus'), ('C', 'Truck'), ('B', 'Car'), ('A', 'Motorcycle')], default='Choose', max_length=2),
),
]
| 25.631579 | 142 | 0.583162 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0004_auto_20210112_1725'),
]
operations = [
migrations.AlterField(
model_name='license',
name='type',
field=models.CharField(choices=[('D', 'Bus'), ('C', 'Truck'), ('B', 'Car'), ('A', 'Motorcycle')], default='Choose', max_length=2),
),
]
| true | true |
1c47192561935cacdc804480f0ef012db41123f8 | 645 | py | Python | Graphy/tests/utils/test_files.py | andrepbento/OpenTracingProcessor | 9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6 | [
"MIT"
] | 4 | 2021-03-06T13:50:58.000Z | 2022-03-28T15:17:07.000Z | Graphy/tests/utils/test_files.py | andrepbento/OpenTracingProcessor | 9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6 | [
"MIT"
] | null | null | null | Graphy/tests/utils/test_files.py | andrepbento/OpenTracingProcessor | 9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6 | [
"MIT"
] | null | null | null | """
Author: André Bento
Date last modified: 26-02-2019
"""
import os
from unittest import TestCase
from graphy.utils import files as my_files
class TestFiles(TestCase):
def setUp(self) -> None:
super().setUp()
self.__file_path = os.path.realpath(__file__)
def test_get_absolute_path(self) -> None:
""" Tests get_absolute_path function. """
with self.assertRaises(FileNotFoundError):
my_files.get_absolute_path('not/found/file.txt')
def test_read_file(self) -> None:
""" Tests read_file function. """
self.assertIsNotNone(my_files.read_file(self.__file_path))
| 25.8 | 66 | 0.671318 | import os
from unittest import TestCase
from graphy.utils import files as my_files
class TestFiles(TestCase):
def setUp(self) -> None:
super().setUp()
self.__file_path = os.path.realpath(__file__)
def test_get_absolute_path(self) -> None:
with self.assertRaises(FileNotFoundError):
my_files.get_absolute_path('not/found/file.txt')
def test_read_file(self) -> None:
self.assertIsNotNone(my_files.read_file(self.__file_path))
| true | true |
1c471a5a4d3049b0f68e2c3cdd6645cca95fe30f | 14,320 | py | Python | transitions/extensions/nesting.py | timokoola/timoechobot | c6e18aa29b538b73dcef1898f1d45bb3bf6d0d55 | [
"Apache-2.0"
] | null | null | null | transitions/extensions/nesting.py | timokoola/timoechobot | c6e18aa29b538b73dcef1898f1d45bb3bf6d0d55 | [
"Apache-2.0"
] | null | null | null | transitions/extensions/nesting.py | timokoola/timoechobot | c6e18aa29b538b73dcef1898f1d45bb3bf6d0d55 | [
"Apache-2.0"
] | null | null | null | from ..core import Machine, Transition, State, Event, listify, MachineError, EventData
from six import string_types
import copy
from functools import partial
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class FunctionWrapper(object):
def __init__(self, func, path):
if len(path) > 0:
self.add(func, path)
self._func = None
else:
self._func = func
def add(self, func, path):
name = path[0]
if name[0].isdigit():
name = 's' + name
if hasattr(self, name):
getattr(self, name).add(func, path[1:])
else:
x = FunctionWrapper(func, path[1:])
setattr(self, name, x)
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
# Added parent and children parameter children is a list of NestedStates
# and parent is the full name of the parent e.g. Foo_Bar_Baz.
class NestedState(State):
separator = '_'
def __init__(self, name, on_enter=None, on_exit=None, ignore_invalid_triggers=None, parent=None, initial=None):
self._name = name
self._initial = initial
self.parent = parent
super(NestedState, self).__init__(name=name, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore_invalid_triggers)
if self.parent:
self.parent.children.append(self)
self.children = []
@property
def initial(self):
return self.name + NestedState.separator + self._initial if self._initial else None
@property
def level(self):
return self.parent.level + 1 if self.parent is not None else 0
@property
def name(self):
return (self.parent.name + NestedState.separator + self._name) if self.parent else self._name
@name.setter
def name(self, value):
self._name = value
def exit_nested(self, event_data, target_state):
if self.level > target_state.level:
self.exit(event_data)
return self.parent.exit_nested(event_data, target_state)
elif self.level <= target_state.level:
tmp_state = target_state
while self.level != tmp_state.level:
tmp_state = tmp_state.parent
tmp_self = self
while tmp_self.level > 0 and tmp_state.parent.name != tmp_self.parent.name:
tmp_self.exit(event_data)
tmp_self = tmp_self.parent
tmp_state = tmp_state.parent
if tmp_self != tmp_state:
tmp_self.exit(event_data)
return tmp_self.level
else:
return tmp_self.level + 1
def enter_nested(self, event_data, level=None):
if level is not None and level <= self.level:
if level != self.level:
self.parent.enter_nested(event_data, level)
self.enter(event_data)
class NestedTransition(Transition):
def execute(self, event_data):
dest_state = event_data.machine.get_state(self.dest)
while dest_state.initial:
dest_state = event_data.machine.get_state(dest_state.initial)
self.dest = dest_state.name
return super(NestedTransition, self).execute(event_data)
# The actual state change method 'execute' in Transition was restructured to allow overriding
def _change_state(self, event_data):
machine = event_data.machine
model = event_data.model
dest_state = machine.get_state(self.dest)
source_state = machine.get_state(model.state)
lvl = source_state.exit_nested(event_data, dest_state)
event_data.machine.set_state(self.dest, model)
event_data.update(model)
dest_state.enter_nested(event_data, lvl)
class NestedEvent(Event):
def _trigger(self, model, *args, **kwargs):
tmp = self.machine.get_state(model.state)
while tmp.parent and tmp.name not in self.transitions:
tmp = tmp.parent
if tmp.name not in self.transitions:
msg = "%sCan't trigger event %s from state %s!" % (self.machine.id, self.name,
model.state)
if self.machine.get_state(model.state).ignore_invalid_triggers:
logger.warning(msg)
else:
raise MachineError(msg)
event = EventData(self.machine.get_state(model.state), self, self.machine,
model, args=args, kwargs=kwargs)
for t in self.transitions[tmp.name]:
event.transition = t
if t.execute(event):
return True
return False
class HierarchicalMachine(Machine):
def __init__(self, *args, **kwargs):
self._buffered_transitions = []
super(HierarchicalMachine, self).__init__(*args, **kwargs)
for model in self.models:
if hasattr(model, 'to'):
logger.warn("%sModel already has a 'to'-method. It will NOT be overwritten by NestedMachine", self.id)
else:
to_func = partial(self.to, model)
setattr(model, 'to', to_func)
# Instead of creating transitions directly, Machine now use a factory method which can be overridden
@staticmethod
def _create_transition(*args, **kwargs):
return NestedTransition(*args, **kwargs)
@staticmethod
def _create_event(*args, **kwargs):
return NestedEvent(*args, **kwargs)
@staticmethod
def _create_state(*args, **kwargs):
return NestedState(*args, **kwargs)
def is_state(self, state_name, model, allow_substates=False):
if not allow_substates:
return model.state == state_name
temp_state = self.get_state(model.state)
while not temp_state.name == state_name and temp_state.level > 0:
temp_state = temp_state.parent
return temp_state.name == state_name
def traverse(self, states, on_enter=None, on_exit=None,
ignore_invalid_triggers=None, parent=None, remap={}):
states = listify(states)
new_states = []
ignore = ignore_invalid_triggers
if ignore is None:
ignore = self.ignore_invalid_triggers
for state in states:
tmp_states = []
# other state representations are handled almost like in the base class but a parent parameter is added
if isinstance(state, string_types):
if state in remap:
continue
tmp_states.append(self._create_state(state, on_enter=on_enter, on_exit=on_exit, parent=parent,
ignore_invalid_triggers=ignore))
elif isinstance(state, dict):
if state['name'] in remap:
continue
state = copy.deepcopy(state)
if 'ignore_invalid_triggers' not in state:
state['ignore_invalid_triggers'] = ignore
state['parent'] = parent
if 'children' in state:
# Concat the state names with the current scope. The scope is the concatenation of all
# previous parents. Call traverse again to check for more nested states.
p = self._create_state(state['name'], on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore, parent=parent,
initial=state.get('initial', None))
nested = self.traverse(state['children'], on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore,
parent=p, remap=state.get('remap', {}))
tmp_states.append(p)
tmp_states.extend(nested)
else:
tmp_states.insert(0, self._create_state(**state))
elif isinstance(state, HierarchicalMachine):
# copy only states not mentioned in remap
copied_states = [s for s in state.states.values() if s.name not in remap]
# inner_states are the root states of the passed machine
# which have be attached to the parent
inner_states = [s for s in copied_states if s.level == 0]
for s in inner_states:
s.parent = parent
tmp_states.extend(copied_states)
for trigger, event in state.events.items():
if trigger.startswith('to_'):
path = trigger[3:].split(NestedState.separator)
# do not copy auto_transitions since they would not be valid anymore;
# trigger and destination do not exist in the new environment
if path[0] in remap:
continue
ppath = parent.name.split(NestedState.separator)
path = ['to_' + ppath[0]] + ppath[1:] + path
trigger = '.'.join(path)
# adjust all transition start and end points to new state names
for transitions in event.transitions.values():
for transition in transitions:
src = transition.source
# transitions from remapped states will be filtered to prevent
# unexpected behaviour in the parent machine
if src in remap:
continue
dst = parent.name + NestedState.separator + transition.dest\
if transition.dest not in remap else remap[transition.dest]
conditions = []
unless = []
for c in transition.conditions:
conditions.append(c.func) if c.target else unless.append(c.func)
self._buffered_transitions.append({'trigger': trigger,
'source': parent.name + NestedState.separator + src,
'dest': dst,
'conditions': conditions,
'unless': unless,
'prepare': transition.prepare,
'before': transition.before,
'after': transition.after})
elif isinstance(state, NestedState):
tmp_states.append(state)
else:
raise ValueError("%s cannot be added to the machine since its type is not known." % state)
new_states.extend(tmp_states)
duplicate_check = []
for s in new_states:
if s.name in duplicate_check:
state_names = [s.name for s in new_states]
raise ValueError("State %s cannot be added since it is already in state list %s." % (s.name, state_names))
else:
duplicate_check.append(s.name)
return new_states
def add_states(self, states, *args, **kwargs):
# preprocess states to flatten the configuration and resolve nesting
new_states = self.traverse(states, *args, **kwargs)
super(HierarchicalMachine, self).add_states(new_states, *args, **kwargs)
# for t in self._buffered_transitions:
# print(t['trigger'])
while len(self._buffered_transitions) > 0:
args = self._buffered_transitions.pop()
self.add_transition(**args)
def get_triggers(self, *args):
# add parents to state set
states = []
for state in args:
s = self.get_state(state)
while s.parent:
states.append(s.parent.name)
s = s.parent
states.extend(args)
return super(HierarchicalMachine, self).get_triggers(*states)
def add_transition(self, trigger, source, dest, conditions=None,
unless=None, before=None, after=None, prepare=None, **kwargs):
if isinstance(source, string_types):
source = [x.name for x in self.states.values()] if source == '*' else [source]
# FunctionWrappers are only necessary if a custom separator is used
if trigger not in self.events and NestedState.separator not in '_':
self.events[trigger] = self._create_event(trigger, self)
if trigger.startswith('to_'):
path = trigger[3:].split(NestedState.separator)
for model in self.models:
trig_func = partial(self.events[trigger].trigger, model=model)
if hasattr(model, 'to_' + path[0]):
t = getattr(model, 'to_' + path[0])
t.add(trig_func, path[1:])
else:
t = FunctionWrapper(trig_func, path[1:])
setattr(model, 'to_' + path[0], t)
else:
for model in self.models:
trig_func = partial(self.events[trigger].trigger, model=model)
setattr(model, trigger, trig_func)
super(HierarchicalMachine, self).add_transition(trigger, source, dest, conditions=conditions, unless=unless,
prepare=prepare, before=before, after=after, **kwargs)
def on_enter(self, state_name, callback):
self.get_state(state_name).add_callback('enter', callback)
def on_exit(self, state_name, callback):
self.get_state(state_name).add_callback('exit', callback)
def to(self, model, state_name, *args, **kwargs):
event = EventData(self.get_state(model.state), None, self,
model, args=args, kwargs=kwargs)
self._create_transition(model.state, state_name).execute(event)
| 44.890282 | 122 | 0.5625 | from ..core import Machine, Transition, State, Event, listify, MachineError, EventData
from six import string_types
import copy
from functools import partial
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class FunctionWrapper(object):
def __init__(self, func, path):
if len(path) > 0:
self.add(func, path)
self._func = None
else:
self._func = func
def add(self, func, path):
name = path[0]
if name[0].isdigit():
name = 's' + name
if hasattr(self, name):
getattr(self, name).add(func, path[1:])
else:
x = FunctionWrapper(func, path[1:])
setattr(self, name, x)
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
class NestedState(State):
separator = '_'
def __init__(self, name, on_enter=None, on_exit=None, ignore_invalid_triggers=None, parent=None, initial=None):
self._name = name
self._initial = initial
self.parent = parent
super(NestedState, self).__init__(name=name, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore_invalid_triggers)
if self.parent:
self.parent.children.append(self)
self.children = []
@property
def initial(self):
return self.name + NestedState.separator + self._initial if self._initial else None
@property
def level(self):
return self.parent.level + 1 if self.parent is not None else 0
@property
def name(self):
return (self.parent.name + NestedState.separator + self._name) if self.parent else self._name
@name.setter
def name(self, value):
self._name = value
def exit_nested(self, event_data, target_state):
if self.level > target_state.level:
self.exit(event_data)
return self.parent.exit_nested(event_data, target_state)
elif self.level <= target_state.level:
tmp_state = target_state
while self.level != tmp_state.level:
tmp_state = tmp_state.parent
tmp_self = self
while tmp_self.level > 0 and tmp_state.parent.name != tmp_self.parent.name:
tmp_self.exit(event_data)
tmp_self = tmp_self.parent
tmp_state = tmp_state.parent
if tmp_self != tmp_state:
tmp_self.exit(event_data)
return tmp_self.level
else:
return tmp_self.level + 1
def enter_nested(self, event_data, level=None):
if level is not None and level <= self.level:
if level != self.level:
self.parent.enter_nested(event_data, level)
self.enter(event_data)
class NestedTransition(Transition):
def execute(self, event_data):
dest_state = event_data.machine.get_state(self.dest)
while dest_state.initial:
dest_state = event_data.machine.get_state(dest_state.initial)
self.dest = dest_state.name
return super(NestedTransition, self).execute(event_data)
def _change_state(self, event_data):
machine = event_data.machine
model = event_data.model
dest_state = machine.get_state(self.dest)
source_state = machine.get_state(model.state)
lvl = source_state.exit_nested(event_data, dest_state)
event_data.machine.set_state(self.dest, model)
event_data.update(model)
dest_state.enter_nested(event_data, lvl)
class NestedEvent(Event):
def _trigger(self, model, *args, **kwargs):
tmp = self.machine.get_state(model.state)
while tmp.parent and tmp.name not in self.transitions:
tmp = tmp.parent
if tmp.name not in self.transitions:
msg = "%sCan't trigger event %s from state %s!" % (self.machine.id, self.name,
model.state)
if self.machine.get_state(model.state).ignore_invalid_triggers:
logger.warning(msg)
else:
raise MachineError(msg)
event = EventData(self.machine.get_state(model.state), self, self.machine,
model, args=args, kwargs=kwargs)
for t in self.transitions[tmp.name]:
event.transition = t
if t.execute(event):
return True
return False
class HierarchicalMachine(Machine):
def __init__(self, *args, **kwargs):
self._buffered_transitions = []
super(HierarchicalMachine, self).__init__(*args, **kwargs)
for model in self.models:
if hasattr(model, 'to'):
logger.warn("%sModel already has a 'to'-method. It will NOT be overwritten by NestedMachine", self.id)
else:
to_func = partial(self.to, model)
setattr(model, 'to', to_func)
# Instead of creating transitions directly, Machine now use a factory method which can be overridden
@staticmethod
def _create_transition(*args, **kwargs):
return NestedTransition(*args, **kwargs)
@staticmethod
def _create_event(*args, **kwargs):
return NestedEvent(*args, **kwargs)
@staticmethod
def _create_state(*args, **kwargs):
return NestedState(*args, **kwargs)
def is_state(self, state_name, model, allow_substates=False):
if not allow_substates:
return model.state == state_name
temp_state = self.get_state(model.state)
while not temp_state.name == state_name and temp_state.level > 0:
temp_state = temp_state.parent
return temp_state.name == state_name
def traverse(self, states, on_enter=None, on_exit=None,
ignore_invalid_triggers=None, parent=None, remap={}):
states = listify(states)
new_states = []
ignore = ignore_invalid_triggers
if ignore is None:
ignore = self.ignore_invalid_triggers
for state in states:
tmp_states = []
# other state representations are handled almost like in the base class but a parent parameter is added
if isinstance(state, string_types):
if state in remap:
continue
tmp_states.append(self._create_state(state, on_enter=on_enter, on_exit=on_exit, parent=parent,
ignore_invalid_triggers=ignore))
elif isinstance(state, dict):
if state['name'] in remap:
continue
state = copy.deepcopy(state)
if 'ignore_invalid_triggers' not in state:
state['ignore_invalid_triggers'] = ignore
state['parent'] = parent
if 'children' in state:
# Concat the state names with the current scope. The scope is the concatenation of all
# previous parents. Call traverse again to check for more nested states.
p = self._create_state(state['name'], on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore, parent=parent,
initial=state.get('initial', None))
nested = self.traverse(state['children'], on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore,
parent=p, remap=state.get('remap', {}))
tmp_states.append(p)
tmp_states.extend(nested)
else:
tmp_states.insert(0, self._create_state(**state))
elif isinstance(state, HierarchicalMachine):
# copy only states not mentioned in remap
copied_states = [s for s in state.states.values() if s.name not in remap]
# inner_states are the root states of the passed machine
# which have be attached to the parent
inner_states = [s for s in copied_states if s.level == 0]
for s in inner_states:
s.parent = parent
tmp_states.extend(copied_states)
for trigger, event in state.events.items():
if trigger.startswith('to_'):
path = trigger[3:].split(NestedState.separator)
# do not copy auto_transitions since they would not be valid anymore;
# trigger and destination do not exist in the new environment
if path[0] in remap:
continue
ppath = parent.name.split(NestedState.separator)
path = ['to_' + ppath[0]] + ppath[1:] + path
trigger = '.'.join(path)
# adjust all transition start and end points to new state names
for transitions in event.transitions.values():
for transition in transitions:
src = transition.source
# transitions from remapped states will be filtered to prevent
# unexpected behaviour in the parent machine
if src in remap:
continue
dst = parent.name + NestedState.separator + transition.dest\
if transition.dest not in remap else remap[transition.dest]
conditions = []
unless = []
for c in transition.conditions:
conditions.append(c.func) if c.target else unless.append(c.func)
self._buffered_transitions.append({'trigger': trigger,
'source': parent.name + NestedState.separator + src,
'dest': dst,
'conditions': conditions,
'unless': unless,
'prepare': transition.prepare,
'before': transition.before,
'after': transition.after})
elif isinstance(state, NestedState):
tmp_states.append(state)
else:
raise ValueError("%s cannot be added to the machine since its type is not known." % state)
new_states.extend(tmp_states)
duplicate_check = []
for s in new_states:
if s.name in duplicate_check:
state_names = [s.name for s in new_states]
raise ValueError("State %s cannot be added since it is already in state list %s." % (s.name, state_names))
else:
duplicate_check.append(s.name)
return new_states
def add_states(self, states, *args, **kwargs):
# preprocess states to flatten the configuration and resolve nesting
new_states = self.traverse(states, *args, **kwargs)
super(HierarchicalMachine, self).add_states(new_states, *args, **kwargs)
# for t in self._buffered_transitions:
# print(t['trigger'])
while len(self._buffered_transitions) > 0:
args = self._buffered_transitions.pop()
self.add_transition(**args)
def get_triggers(self, *args):
# add parents to state set
states = []
for state in args:
s = self.get_state(state)
while s.parent:
states.append(s.parent.name)
s = s.parent
states.extend(args)
return super(HierarchicalMachine, self).get_triggers(*states)
def add_transition(self, trigger, source, dest, conditions=None,
unless=None, before=None, after=None, prepare=None, **kwargs):
if isinstance(source, string_types):
source = [x.name for x in self.states.values()] if source == '*' else [source]
# FunctionWrappers are only necessary if a custom separator is used
if trigger not in self.events and NestedState.separator not in '_':
self.events[trigger] = self._create_event(trigger, self)
if trigger.startswith('to_'):
path = trigger[3:].split(NestedState.separator)
for model in self.models:
trig_func = partial(self.events[trigger].trigger, model=model)
if hasattr(model, 'to_' + path[0]):
t = getattr(model, 'to_' + path[0])
t.add(trig_func, path[1:])
else:
t = FunctionWrapper(trig_func, path[1:])
setattr(model, 'to_' + path[0], t)
else:
for model in self.models:
trig_func = partial(self.events[trigger].trigger, model=model)
setattr(model, trigger, trig_func)
super(HierarchicalMachine, self).add_transition(trigger, source, dest, conditions=conditions, unless=unless,
prepare=prepare, before=before, after=after, **kwargs)
def on_enter(self, state_name, callback):
self.get_state(state_name).add_callback('enter', callback)
def on_exit(self, state_name, callback):
self.get_state(state_name).add_callback('exit', callback)
def to(self, model, state_name, *args, **kwargs):
event = EventData(self.get_state(model.state), None, self,
model, args=args, kwargs=kwargs)
self._create_transition(model.state, state_name).execute(event)
| true | true |
1c471b8e54efc4a698c7d8da207470a25a2d8196 | 2,348 | py | Python | bin/document_schemas.py | lbianchi-lbl/watertap | 8e727255240464b95c8b81eaead6ed3bd3e61a18 | [
"BSD-3-Clause-LBNL"
] | null | null | null | bin/document_schemas.py | lbianchi-lbl/watertap | 8e727255240464b95c8b81eaead6ed3bd3e61a18 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-09-24T00:33:49.000Z | 2021-09-24T00:33:49.000Z | bin/document_schemas.py | lbianchi-lbl/proteuslib | 8e727255240464b95c8b81eaead6ed3bd3e61a18 | [
"BSD-3-Clause-LBNL"
] | null | null | null | ###############################################################################
# ProteusLib Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/nawi-hub/proteuslib/"
#
###############################################################################
"""
Utility script to document schema files.
Example usage and expected output::
> python document_schemas.py my_output_dir/html/
> ls html
Directory: C:\Users\MyName\my_output_dir\html
Mode LastWriteTime Length Name
---- ------------- ------ ----
-a---- 5/14/2021 9:00 AM 27345 component.html
-a---- 5/14/2021 9:00 AM 1324 component.json
-a---- 5/14/2021 9:00 AM 18781 reaction.html
-a---- 5/14/2021 9:00 AM 1034 reaction.json
-a---- 5/14/2021 9:00 AM 6391 schema_doc.css
-a---- 5/14/2021 9:00 AM 984 schema_doc.min.js
"""
# stdlib
import argparse
from json_schema_for_humans.generate import generate_from_file_object
import json
from pathlib import Path
import sys
# package
from proteuslib.edb.schemas import schemas
__author__ = "Dan Gunter (LBNL)"
def main():
prs = argparse.ArgumentParser(description="Generate schema docs")
prs.add_argument("directory", help="Directory to put generated schema docs")
args = prs.parse_args()
output_dir = Path(args.directory)
for schema in "component", "reaction":
schema_file = output_dir / f"{schema}.json"
with schema_file.open("w") as f:
json.dump(schemas[schema], f)
output_file = (output_dir / f"{schema}.html").open("w")
generate_from_file_object(schema_file.open("r"), output_file)
print(f"Docs for {schema} at: {output_file.name}")
return 0
if __name__ == "__main__":
sys.exit(main()) | 36.6875 | 81 | 0.602215 | false | true | |
1c471d10ce8e04eeb505e3f4ed1e1419f490f38a | 3,731 | py | Python | homeassistant/components/simplisafe/binary_sensor.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 5 | 2019-02-24T11:46:18.000Z | 2019-05-28T17:37:21.000Z | homeassistant/components/simplisafe/binary_sensor.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 77 | 2020-07-16T16:43:09.000Z | 2022-03-31T06:14:37.000Z | homeassistant/components/simplisafe/binary_sensor.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """Support for SimpliSafe binary sensors."""
from __future__ import annotations
from simplipy.entity import Entity as SimplipyEntity, EntityTypes
from simplipy.system.v2 import SystemV2
from simplipy.system.v3 import SystemV3
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import SimpliSafe, SimpliSafeBaseSensor
from .const import DATA_CLIENT, DOMAIN, LOGGER
SUPPORTED_BATTERY_SENSOR_TYPES = [
EntityTypes.carbon_monoxide,
EntityTypes.entry,
EntityTypes.glass_break,
EntityTypes.leak,
EntityTypes.lock_keypad,
EntityTypes.motion,
EntityTypes.siren,
EntityTypes.smoke,
EntityTypes.temperature,
]
TRIGGERED_SENSOR_TYPES = {
EntityTypes.carbon_monoxide: DEVICE_CLASS_GAS,
EntityTypes.entry: DEVICE_CLASS_DOOR,
EntityTypes.glass_break: DEVICE_CLASS_SAFETY,
EntityTypes.leak: DEVICE_CLASS_MOISTURE,
EntityTypes.motion: DEVICE_CLASS_MOTION,
EntityTypes.siren: DEVICE_CLASS_SAFETY,
EntityTypes.smoke: DEVICE_CLASS_SMOKE,
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up SimpliSafe binary sensors based on a config entry."""
simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
sensors: list[BatteryBinarySensor | TriggeredBinarySensor] = []
for system in simplisafe.systems.values():
if system.version == 2:
LOGGER.info("Skipping sensor setup for V2 system: %s", system.system_id)
continue
for sensor in system.sensors.values():
if sensor.type in TRIGGERED_SENSOR_TYPES:
sensors.append(
TriggeredBinarySensor(
simplisafe,
system,
sensor,
TRIGGERED_SENSOR_TYPES[sensor.type],
)
)
if sensor.type in SUPPORTED_BATTERY_SENSOR_TYPES:
sensors.append(BatteryBinarySensor(simplisafe, system, sensor))
async_add_entities(sensors)
class TriggeredBinarySensor(SimpliSafeBaseSensor, BinarySensorEntity):
"""Define a binary sensor related to whether an entity has been triggered."""
def __init__(
self,
simplisafe: SimpliSafe,
system: SystemV2 | SystemV3,
sensor: SimplipyEntity,
device_class: str,
) -> None:
"""Initialize."""
super().__init__(simplisafe, system, sensor)
self._attr_device_class = device_class
@callback
def async_update_from_rest_api(self) -> None:
"""Update the entity with the provided REST API data."""
self._attr_is_on = self._sensor.triggered
class BatteryBinarySensor(SimpliSafeBaseSensor, BinarySensorEntity):
"""Define a SimpliSafe battery binary sensor entity."""
_attr_device_class = DEVICE_CLASS_BATTERY
def __init__(
self,
simplisafe: SimpliSafe,
system: SystemV2 | SystemV3,
sensor: SimplipyEntity,
) -> None:
"""Initialize."""
super().__init__(simplisafe, system, sensor)
self._attr_unique_id = f"{super().unique_id}-battery"
@callback
def async_update_from_rest_api(self) -> None:
"""Update the entity with the provided REST API data."""
self._attr_is_on = self._sensor.low_battery
| 31.618644 | 84 | 0.693916 | from __future__ import annotations
from simplipy.entity import Entity as SimplipyEntity, EntityTypes
from simplipy.system.v2 import SystemV2
from simplipy.system.v3 import SystemV3
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import SimpliSafe, SimpliSafeBaseSensor
from .const import DATA_CLIENT, DOMAIN, LOGGER
SUPPORTED_BATTERY_SENSOR_TYPES = [
EntityTypes.carbon_monoxide,
EntityTypes.entry,
EntityTypes.glass_break,
EntityTypes.leak,
EntityTypes.lock_keypad,
EntityTypes.motion,
EntityTypes.siren,
EntityTypes.smoke,
EntityTypes.temperature,
]
TRIGGERED_SENSOR_TYPES = {
EntityTypes.carbon_monoxide: DEVICE_CLASS_GAS,
EntityTypes.entry: DEVICE_CLASS_DOOR,
EntityTypes.glass_break: DEVICE_CLASS_SAFETY,
EntityTypes.leak: DEVICE_CLASS_MOISTURE,
EntityTypes.motion: DEVICE_CLASS_MOTION,
EntityTypes.siren: DEVICE_CLASS_SAFETY,
EntityTypes.smoke: DEVICE_CLASS_SMOKE,
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
sensors: list[BatteryBinarySensor | TriggeredBinarySensor] = []
for system in simplisafe.systems.values():
if system.version == 2:
LOGGER.info("Skipping sensor setup for V2 system: %s", system.system_id)
continue
for sensor in system.sensors.values():
if sensor.type in TRIGGERED_SENSOR_TYPES:
sensors.append(
TriggeredBinarySensor(
simplisafe,
system,
sensor,
TRIGGERED_SENSOR_TYPES[sensor.type],
)
)
if sensor.type in SUPPORTED_BATTERY_SENSOR_TYPES:
sensors.append(BatteryBinarySensor(simplisafe, system, sensor))
async_add_entities(sensors)
class TriggeredBinarySensor(SimpliSafeBaseSensor, BinarySensorEntity):
def __init__(
self,
simplisafe: SimpliSafe,
system: SystemV2 | SystemV3,
sensor: SimplipyEntity,
device_class: str,
) -> None:
super().__init__(simplisafe, system, sensor)
self._attr_device_class = device_class
@callback
def async_update_from_rest_api(self) -> None:
self._attr_is_on = self._sensor.triggered
class BatteryBinarySensor(SimpliSafeBaseSensor, BinarySensorEntity):
_attr_device_class = DEVICE_CLASS_BATTERY
def __init__(
self,
simplisafe: SimpliSafe,
system: SystemV2 | SystemV3,
sensor: SimplipyEntity,
) -> None:
super().__init__(simplisafe, system, sensor)
self._attr_unique_id = f"{super().unique_id}-battery"
@callback
def async_update_from_rest_api(self) -> None:
self._attr_is_on = self._sensor.low_battery
| true | true |
1c471d26f4e0f5fcaf6255a003f2d7ce2936056e | 19,223 | py | Python | spectrochempy/core/analysis/simplisma.py | spectrochempy/spectrochempy | 829b290f465e630078785e303dbab197cd78b815 | [
"Apache-2.0",
"CECILL-B",
"BSD-3-Clause"
] | 44 | 2020-05-14T01:56:40.000Z | 2022-03-23T11:16:30.000Z | spectrochempy/core/analysis/simplisma.py | spectrochempy/spectrochempy | 829b290f465e630078785e303dbab197cd78b815 | [
"Apache-2.0",
"CECILL-B",
"BSD-3-Clause"
] | 210 | 2020-05-22T17:33:22.000Z | 2022-03-20T16:50:30.000Z | spectrochempy/core/analysis/simplisma.py | spectrochempy/spectrochempy | 829b290f465e630078785e303dbab197cd78b815 | [
"Apache-2.0",
"CECILL-B",
"BSD-3-Clause"
] | 9 | 2020-05-16T15:36:02.000Z | 2022-03-23T11:16:56.000Z | # -*- coding: utf-8 -*-
#
# =============================================================================
# Copyright (©) 2015-2022 LCS
# Laboratoire Catalyse et Spectrochimie, Caen, France.
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT
# See full LICENSE agreement in the root directory
# =============================================================================
"""
This module implement the SIMPLISMA class.
"""
__all__ = ["SIMPLISMA"]
__dataset_methods__ = []
# ----------------------------------------------------------------------------
# imports
# ----------------------------------------------------------------------------
import numpy as np
import warnings
from traitlets import HasTraits, Instance, Unicode
from spectrochempy.core.dataset.nddataset import NDDataset
from spectrochempy.core.dataset.npy import dot
from spectrochempy.core import info_, set_loglevel, INFO
# ============================================================================
# class SIMPLISMA
# ============================================================================
class SIMPLISMA(HasTraits):
"""
SIMPLe to use Interactive Self-modeling Mixture Analysis.
This class performs a SIMPLISMA analysis of a 2D |NDDataset|. The algorithm is adapted from Windig's paper,
Chemometrics and Intelligent Laboratory Systems, 36, 1997, 3-16.
TODO : adapt to 3DDataset ?
"""
_St = Instance(NDDataset)
_C = Instance(NDDataset)
_X = Instance(NDDataset)
_Pt = Instance(NDDataset)
_s = Instance(NDDataset)
_logs = Unicode
def __init__(self, dataset, **kwargs):
"""
Parameters
----------
dataset : |NDDataset|
A 2D dataset containing the data matrix (spectra in rows).
interactive : bool, optional, default=False
If True, the determination of purest variables is carried out interactively
n_pc : int, optional, default=2 in non-interactive mode; 100 in interactive mode
The maximum number of pure compounds. Used only for non interactive analysis
(the default in interative mode (100) will never be reached in practice).
tol : float, optional, default=0.1
The convergence criterion on the percent of unexplained variance.
noise : float or int, optional, default=5
A correction factor (%) for low intensity variables (0 - no offset, 15 - large offset).
verbose : bool, optional, default=True
If True some information is given during the analysis.
"""
super().__init__()
# ------------------------------------------------------------------------
# Utility functions
# ------------------------------------------------------------------------
def figures_of_merit(X, maxPIndex, C, St, j):
# return %explained variance and stdev of residuals when the jth compound is added
C[:, j] = X[:, maxPIndex[j]]
St[0 : j + 1, :] = np.linalg.lstsq(
C.data[:, 0 : j + 1], X.data, rcond=None
)[0]
Xhat = dot(C[:, 0 : j + 1], St[0 : j + 1, :])
res = Xhat - X
stdev_res = np.std(res)
rsquare = 1 - np.linalg.norm(res) ** 2 / np.linalg.norm(X) ** 2
return rsquare, stdev_res
def str_iter_summary(j, index, coord, rsquare, stdev_res, diff):
# return formatted list of figure of merits at a given iteration
string = "{:4} {:5} {:8.1f} {:10.4f} {:10.4f} ".format(
j + 1, index, coord, stdev_res, rsquare
)
return string
def get_x_data(X):
if X.x is not None and not X.x.is_empty: # TODO what about labels?
return X.x.data
else:
return np.arange(X.shape[-1])
# ------------------------------------------------------------------------
# Check data
# ------------------------------------------------------------------------
X = dataset
if len(X.shape) != 2:
raise ValueError("For now, SIMPLISMA only handles 2D Datasets")
if np.min(X.data) < 0:
warnings.warn("SIMPLISMA does not handle easily negative values.")
# TODO: check whether negative values should be set to zero or not.
verbose = kwargs.get("verbose", True)
if verbose:
set_loglevel(INFO)
interactive = kwargs.get("interactive", False)
tol = kwargs.get("tol", 0.1)
noise = kwargs.get("noise", 3)
n_pc = kwargs.get("n_pc", 2)
if n_pc < 2 or not isinstance(n_pc, int):
raise ValueError(
"Oh you did not just... 'MA' in simplisMA stands for Mixture Analysis. "
"The number of pure compounds should be an integer larger than 2"
)
if interactive:
n_pc = 100
# ------------------------------------------------------------------------
# Core
# ------------------------------------------------------------------------
if not interactive:
logs = "*** Automatic SIMPL(I)SMA analysis *** \n"
else:
logs = "*** Interative SIMPLISMA analysis *** \n"
logs += "dataset: {}\n".format(X.name)
logs += " noise: {:2} %\n".format(noise)
if not interactive:
logs += " tol: {:2} %\n".format(tol)
logs += " n_pc: {:2}\n".format(n_pc)
logs += "\n"
logs += "#iter index_pc coord_pc Std(res) R^2 \n"
logs += "---------------------------------------------"
info_(logs)
logs += "\n"
# Containers for returned objects and intermediate data
# ---------------------------------------------------
# purity 'spectra' (generally spectra if X is passed,
# but could also be concentrations if X.T is passed)
Pt = NDDataset.zeros((n_pc, X.shape[-1]))
Pt.name = "Purity spectra"
Pt.set_coordset(y=Pt.y, x=X.x)
Pt.y.title = "# pure compound"
# weight matrix
w = NDDataset.zeros((n_pc, X.shape[-1]))
w.set_coordset(y=Pt.y, x=X.x)
# Stdev spectrum
s = NDDataset.zeros((n_pc, X.shape[-1]))
s.name = "Standard deviation spectra"
s.set_coordset(y=Pt.y, x=X.x)
# maximum purity indexes and coordinates
maxPIndex = [0] * n_pc
maxPCoordinate = [0] * n_pc
# Concentration matrix
C = NDDataset.zeros((X.shape[-2], n_pc))
C.name = "Relative Concentrations"
C.set_coordset(y=X.y, x=C.x)
C.x.title = "# pure compound"
# Pure component spectral profiles
St = NDDataset.zeros((n_pc, X.shape[-1]))
St.name = "Pure compound spectra"
St.set_coordset(y=Pt.y, x=X.x)
# Compute Statistics
# ------------------
sigma = np.std(X.data, axis=0)
mu = np.mean(X.data, axis=0)
alpha = (noise / 100) * np.max(mu.data)
lamda = np.sqrt(mu ** 2 + sigma ** 2)
p = sigma / (mu + alpha)
# scale dataset
Xscaled = X.data / np.sqrt(mu ** 2 + (sigma + alpha) ** 2)
# COO dispersion matrix
COO = (1 / X.shape[-2]) * np.dot(Xscaled.T, Xscaled)
# Determine the purest variables
j = 0
finished = False
while not finished:
# compute first purest variable and weights
if j == 0:
w[j, :] = lamda ** 2 / (mu ** 2 + (sigma + alpha) ** 2)
s[j, :] = sigma * w[j, :]
Pt[j, :] = p * w[j, :]
# get index and coordinate of pure variable
maxPIndex[j] = np.argmax(Pt[j, :].data)
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
# compute figures of merit
rsquare0, stdev_res0 = figures_of_merit(X, maxPIndex, C, St, j)
# add summary to log
llog = str_iter_summary(
j, maxPIndex[j], maxPCoordinate[j], rsquare0, stdev_res0, ""
)
logs += llog + "\n"
if verbose or interactive:
print(llog)
if interactive:
# should plot purity and stdev, does not work for the moment
# TODO: fix the code below
# fig1, (ax1, ax2) = plt.subplots(2,1)
# Pt[j, :].plot(ax=ax1)
# ax1.set_title('Purity spectrum #{}'.format(j+1))
# ax1.axvline(maxPCoordinate[j], color='r')
# s[j, :].plot(ax=ax2)
# ax2.set_title('standard deviation spectrum #{}'.format(j+1))
# ax2.axvline(maxPCoordinate[j], color='r')
# plt.show()
ans = ""
while ans.lower() not in ["a", "c"]:
ans = input(" |--> (a) Accept, (c) Change: ")
while ans.lower() != "a":
new = input(
" |--> enter the new index (int) or variable value (float): "
)
try:
new = int(new)
maxPIndex[j] = new
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
try:
new = float(new)
maxPIndex[j] = np.argmin(abs(get_x_data(X) - new))
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
print(
"Incorrect answer. Please enter a valid index or value"
)
rsquare0, stdev_res0 = figures_of_merit(X, maxPIndex, C, St, j)
llog = str_iter_summary(
j, maxPIndex[j], maxPCoordinate[j], rsquare0, stdev_res0, ""
)
logs += " |--> changed pure variable #1"
logs += llog + "\n"
info_(llog)
ans = input(" |--> (a) Accept, (c) Change: ")
# ans was [a]ccept
j += 1
if not interactive:
j += 1
prev_stdev_res = stdev_res0
else:
# compute jth purest variable
for i in range(X.shape[-1]):
Mji = np.zeros((j + 1, j + 1))
idx = [i] + maxPIndex[0:j]
for line in range(j + 1):
for col in range(j + 1):
Mji[line, col] = COO[idx[line], idx[col]]
w[j, i] = np.linalg.det(Mji)
Pt[j:] = p * w[j, :]
s[j, :] = sigma * w[j, :]
# get index and coordinate of jth pure variable
maxPIndex[j] = np.argmax(Pt[j, :].data)
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
# compute figures of merit
rsquarej, stdev_resj = figures_of_merit(X, maxPIndex, C, St, j)
diff = 100 * (stdev_resj - prev_stdev_res) / prev_stdev_res
prev_stdev_res = stdev_resj
# add summary to log
llog = str_iter_summary(
j, maxPIndex[j], maxPCoordinate[j], rsquarej, stdev_resj, diff
)
logs += llog + "\n"
if verbose or interactive:
info_(llog)
if (
interactive
): # TODO: I suggest to use jupyter widgets for the interactivity!
# should plot purity and stdev, does not work for the moment
# TODO: fix the code below
# ax1.clear()
# ax1.set_title('Purity spectrum #{}'.format(j+1))
# Pt[j, :].plot(ax=ax1)
# for coord in maxPCoordinate[:-1]:
# ax1.axvline(coord, color='g')
# ax1.axvline(maxPCoordinate[j], color='r')
# ax2.clear()
# ax2.set_title('standard deviation spectrum #{}'.format(j+1))
# s[j, :].plot(ax=ax2)
# for coord in maxPCoordinate[:-1]:
# ax2.axvline(coord, color='g')
# ax2.axvline(maxPCoordinate[j], color='r')
# plt.show()
ans = ""
while ans.lower() not in ["a", "c", "r", "f"]:
ans = input(
" |--> (a) Accept and continue, (c) Change, (r) Reject, (f) Accept and finish: "
)
while ans.lower() == "c":
new = input(
" |--> enter the new index (int) or variable value (float): "
)
try:
new = int(new)
maxPIndex[j] = new
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
try:
new = float(new)
maxPIndex[j] = np.argmin(abs(get_x_data(X) - new))
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
print(
" |--> Incorrect answer. Please enter a valid index or value"
)
rsquarej, stdev_resj = figures_of_merit(X, maxPIndex, C, St, j)
diff = 100 * (stdev_resj - prev_stdev_res) / prev_stdev_res
prev_stdev_res + stdev_resj
logs += f" |--> changed pure variable #{j + 1}\n"
llog = str_iter_summary(
j,
maxPIndex[j],
maxPCoordinate[j],
rsquarej,
stdev_resj,
"diff",
)
logs += llog + "\n"
info_(llog)
info_(
f"purest variable #{j + 1} set at index = {maxPIndex[j]} ; x = {maxPCoordinate[j]}"
)
ans = input(
" |--> (a) Accept and continue, (c) Change, (r) Reject, (f) Accept and stop: "
)
if ans.lower() == "r":
maxPCoordinate[j] = 0
maxPIndex[j] = 0
logs += f" |--> rejected pure variable #{j + 1}\n"
j = j - 1
elif ans.lower() == "a":
j = j + 1
elif ans.lower() == "f":
finished = True
j = j + 1
llog = f"\n**** Interrupted by user at compound # {j} \n**** End of SIMPL(I)SMA analysis."
logs += llog + "\n"
Pt = Pt[0:j, :]
St = St[0:j, :]
s = s[0:j, :]
C = C[:, 0:j]
# not interactive
else:
j = j + 1
if (1 - rsquarej) < tol / 100:
llog = (
f"\n**** Unexplained variance lower than 'tol' ({tol}%) \n"
"**** End of SIMPL(I)SMA analysis."
)
logs += llog + "\n"
Pt = Pt[0:j, :]
St = St[0:j, :]
s = s[0:j, :]
C = C[:, 0:j]
info_(llog)
finished = True
if j == n_pc:
if not interactive:
llog = (
f"\n**** Reached maximum number of pure compounds 'n_pc' ({n_pc}) \n"
"**** End of SIMPL(I)SMA analysis."
)
logs += llog + "\n"
info_(llog)
finished = True
Pt.description = "Purity spectra from SIMPLISMA:\n" + logs
C.description = "Concentration/contribution matrix from SIMPLISMA:\n" + logs
St.description = "Pure compound spectra matrix from SIMPLISMA:\n" + logs
s.description = "Standard deviation spectra matrix from SIMPLISMA:\n" + logs
self._logs = logs
self._X = X
self._Pt = Pt
self._C = C
self._St = St
self._s = s
@property
def X(self):
"""
The original dataset.
"""
return self._X
@property
def St(self):
"""
Spectra of pure compounds.
"""
return self._St
@property
def C(self):
"""
Intensities ('concentrations') of pure compounds in spectra.
"""
return self._C
@property
def Pt(self):
"""
Purity spectra.
"""
return self._Pt
@property
def s(self):
"""
Standard deviation spectra.
"""
return self._s
@property
def logs(self):
"""
Logs ouptut.
"""
return self._logs
def reconstruct(self):
"""
Transform data back to the original space.
The following matrix operation is performed: :math:`X'_{hat} = C'.S'^t`
Returns
-------
X_hat
The reconstructed dataset based on the SIMPLISMA Analysis.
"""
# reconstruct from concentration and spectra profiles
X_hat = dot(self.C, self.St)
X_hat.description = "Dataset reconstructed by SIMPLISMA\n" + self.logs
X_hat.title = "X_hat: " + self.X.title
return X_hat
def plotmerit(self, **kwargs):
"""
Plots the input dataset, reconstructed dataset and residuals.
Parameters
----------
**kwargs : dict
Plotting parameters.
Returns
-------
ax
subplot.
"""
colX, colXhat, colRes = kwargs.get("colors", ["blue", "green", "red"])
X_hat = self.reconstruct()
res = self.X - X_hat
ax = self.X.plot(label="$X$")
ax.plot(X_hat.data.T, color=colXhat, label=r"$\hat{X}")
ax.plot(res.data.T, color=colRes, label="Residual")
ax.set_title("SIMPLISMA plot: " + self.X.name)
return ax
# ============================================================================
if __name__ == "__main__":
pass
| 36.967308 | 114 | 0.427509 |
__all__ = ["SIMPLISMA"]
__dataset_methods__ = []
import numpy as np
import warnings
from traitlets import HasTraits, Instance, Unicode
from spectrochempy.core.dataset.nddataset import NDDataset
from spectrochempy.core.dataset.npy import dot
from spectrochempy.core import info_, set_loglevel, INFO
class SIMPLISMA(HasTraits):
_St = Instance(NDDataset)
_C = Instance(NDDataset)
_X = Instance(NDDataset)
_Pt = Instance(NDDataset)
_s = Instance(NDDataset)
_logs = Unicode
def __init__(self, dataset, **kwargs):
super().__init__()
def figures_of_merit(X, maxPIndex, C, St, j):
C[:, j] = X[:, maxPIndex[j]]
St[0 : j + 1, :] = np.linalg.lstsq(
C.data[:, 0 : j + 1], X.data, rcond=None
)[0]
Xhat = dot(C[:, 0 : j + 1], St[0 : j + 1, :])
res = Xhat - X
stdev_res = np.std(res)
rsquare = 1 - np.linalg.norm(res) ** 2 / np.linalg.norm(X) ** 2
return rsquare, stdev_res
def str_iter_summary(j, index, coord, rsquare, stdev_res, diff):
string = "{:4} {:5} {:8.1f} {:10.4f} {:10.4f} ".format(
j + 1, index, coord, stdev_res, rsquare
)
return string
def get_x_data(X):
if X.x is not None and not X.x.is_empty:
return X.x.data
else:
return np.arange(X.shape[-1])
X = dataset
if len(X.shape) != 2:
raise ValueError("For now, SIMPLISMA only handles 2D Datasets")
if np.min(X.data) < 0:
warnings.warn("SIMPLISMA does not handle easily negative values.")
verbose = kwargs.get("verbose", True)
if verbose:
set_loglevel(INFO)
interactive = kwargs.get("interactive", False)
tol = kwargs.get("tol", 0.1)
noise = kwargs.get("noise", 3)
n_pc = kwargs.get("n_pc", 2)
if n_pc < 2 or not isinstance(n_pc, int):
raise ValueError(
"Oh you did not just... 'MA' in simplisMA stands for Mixture Analysis. "
"The number of pure compounds should be an integer larger than 2"
)
if interactive:
n_pc = 100
if not interactive:
logs = "*** Automatic SIMPL(I)SMA analysis *** \n"
else:
logs = "*** Interative SIMPLISMA analysis *** \n"
logs += "dataset: {}\n".format(X.name)
logs += " noise: {:2} %\n".format(noise)
if not interactive:
logs += " tol: {:2} %\n".format(tol)
logs += " n_pc: {:2}\n".format(n_pc)
logs += "\n"
logs += "#iter index_pc coord_pc Std(res) R^2 \n"
logs += "---------------------------------------------"
info_(logs)
logs += "\n"
Pt = NDDataset.zeros((n_pc, X.shape[-1]))
Pt.name = "Purity spectra"
Pt.set_coordset(y=Pt.y, x=X.x)
Pt.y.title = "# pure compound"
w = NDDataset.zeros((n_pc, X.shape[-1]))
w.set_coordset(y=Pt.y, x=X.x)
s = NDDataset.zeros((n_pc, X.shape[-1]))
s.name = "Standard deviation spectra"
s.set_coordset(y=Pt.y, x=X.x)
maxPIndex = [0] * n_pc
maxPCoordinate = [0] * n_pc
C = NDDataset.zeros((X.shape[-2], n_pc))
C.name = "Relative Concentrations"
C.set_coordset(y=X.y, x=C.x)
C.x.title = "# pure compound"
St = NDDataset.zeros((n_pc, X.shape[-1]))
St.name = "Pure compound spectra"
St.set_coordset(y=Pt.y, x=X.x)
sigma = np.std(X.data, axis=0)
mu = np.mean(X.data, axis=0)
alpha = (noise / 100) * np.max(mu.data)
lamda = np.sqrt(mu ** 2 + sigma ** 2)
p = sigma / (mu + alpha)
Xscaled = X.data / np.sqrt(mu ** 2 + (sigma + alpha) ** 2)
COO = (1 / X.shape[-2]) * np.dot(Xscaled.T, Xscaled)
j = 0
finished = False
while not finished:
if j == 0:
w[j, :] = lamda ** 2 / (mu ** 2 + (sigma + alpha) ** 2)
s[j, :] = sigma * w[j, :]
Pt[j, :] = p * w[j, :]
maxPIndex[j] = np.argmax(Pt[j, :].data)
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
rsquare0, stdev_res0 = figures_of_merit(X, maxPIndex, C, St, j)
llog = str_iter_summary(
j, maxPIndex[j], maxPCoordinate[j], rsquare0, stdev_res0, ""
)
logs += llog + "\n"
if verbose or interactive:
print(llog)
if interactive:
ans = ""
while ans.lower() not in ["a", "c"]:
ans = input(" |--> (a) Accept, (c) Change: ")
while ans.lower() != "a":
new = input(
" |--> enter the new index (int) or variable value (float): "
)
try:
new = int(new)
maxPIndex[j] = new
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
try:
new = float(new)
maxPIndex[j] = np.argmin(abs(get_x_data(X) - new))
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
print(
"Incorrect answer. Please enter a valid index or value"
)
rsquare0, stdev_res0 = figures_of_merit(X, maxPIndex, C, St, j)
llog = str_iter_summary(
j, maxPIndex[j], maxPCoordinate[j], rsquare0, stdev_res0, ""
)
logs += " |--> changed pure variable #1"
logs += llog + "\n"
info_(llog)
ans = input(" |--> (a) Accept, (c) Change: ")
j += 1
if not interactive:
j += 1
prev_stdev_res = stdev_res0
else:
for i in range(X.shape[-1]):
Mji = np.zeros((j + 1, j + 1))
idx = [i] + maxPIndex[0:j]
for line in range(j + 1):
for col in range(j + 1):
Mji[line, col] = COO[idx[line], idx[col]]
w[j, i] = np.linalg.det(Mji)
Pt[j:] = p * w[j, :]
s[j, :] = sigma * w[j, :]
maxPIndex[j] = np.argmax(Pt[j, :].data)
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
rsquarej, stdev_resj = figures_of_merit(X, maxPIndex, C, St, j)
diff = 100 * (stdev_resj - prev_stdev_res) / prev_stdev_res
prev_stdev_res = stdev_resj
llog = str_iter_summary(
j, maxPIndex[j], maxPCoordinate[j], rsquarej, stdev_resj, diff
)
logs += llog + "\n"
if verbose or interactive:
info_(llog)
if (
interactive
):
ans = ""
while ans.lower() not in ["a", "c", "r", "f"]:
ans = input(
" |--> (a) Accept and continue, (c) Change, (r) Reject, (f) Accept and finish: "
)
while ans.lower() == "c":
new = input(
" |--> enter the new index (int) or variable value (float): "
)
try:
new = int(new)
maxPIndex[j] = new
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
try:
new = float(new)
maxPIndex[j] = np.argmin(abs(get_x_data(X) - new))
maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]
except ValueError:
print(
" |--> Incorrect answer. Please enter a valid index or value"
)
rsquarej, stdev_resj = figures_of_merit(X, maxPIndex, C, St, j)
diff = 100 * (stdev_resj - prev_stdev_res) / prev_stdev_res
prev_stdev_res + stdev_resj
logs += f" |--> changed pure variable #{j + 1}\n"
llog = str_iter_summary(
j,
maxPIndex[j],
maxPCoordinate[j],
rsquarej,
stdev_resj,
"diff",
)
logs += llog + "\n"
info_(llog)
info_(
f"purest variable #{j + 1} set at index = {maxPIndex[j]} ; x = {maxPCoordinate[j]}"
)
ans = input(
" |--> (a) Accept and continue, (c) Change, (r) Reject, (f) Accept and stop: "
)
if ans.lower() == "r":
maxPCoordinate[j] = 0
maxPIndex[j] = 0
logs += f" |--> rejected pure variable #{j + 1}\n"
j = j - 1
elif ans.lower() == "a":
j = j + 1
elif ans.lower() == "f":
finished = True
j = j + 1
llog = f"\n**** Interrupted by user at compound # {j} \n**** End of SIMPL(I)SMA analysis."
logs += llog + "\n"
Pt = Pt[0:j, :]
St = St[0:j, :]
s = s[0:j, :]
C = C[:, 0:j]
else:
j = j + 1
if (1 - rsquarej) < tol / 100:
llog = (
f"\n**** Unexplained variance lower than 'tol' ({tol}%) \n"
"**** End of SIMPL(I)SMA analysis."
)
logs += llog + "\n"
Pt = Pt[0:j, :]
St = St[0:j, :]
s = s[0:j, :]
C = C[:, 0:j]
info_(llog)
finished = True
if j == n_pc:
if not interactive:
llog = (
f"\n**** Reached maximum number of pure compounds 'n_pc' ({n_pc}) \n"
"**** End of SIMPL(I)SMA analysis."
)
logs += llog + "\n"
info_(llog)
finished = True
Pt.description = "Purity spectra from SIMPLISMA:\n" + logs
C.description = "Concentration/contribution matrix from SIMPLISMA:\n" + logs
St.description = "Pure compound spectra matrix from SIMPLISMA:\n" + logs
s.description = "Standard deviation spectra matrix from SIMPLISMA:\n" + logs
self._logs = logs
self._X = X
self._Pt = Pt
self._C = C
self._St = St
self._s = s
@property
def X(self):
return self._X
@property
def St(self):
return self._St
@property
def C(self):
return self._C
@property
def Pt(self):
return self._Pt
@property
def s(self):
return self._s
@property
def logs(self):
return self._logs
def reconstruct(self):
X_hat = dot(self.C, self.St)
X_hat.description = "Dataset reconstructed by SIMPLISMA\n" + self.logs
X_hat.title = "X_hat: " + self.X.title
return X_hat
def plotmerit(self, **kwargs):
colX, colXhat, colRes = kwargs.get("colors", ["blue", "green", "red"])
X_hat = self.reconstruct()
res = self.X - X_hat
ax = self.X.plot(label="$X$")
ax.plot(X_hat.data.T, color=colXhat, label=r"$\hat{X}")
ax.plot(res.data.T, color=colRes, label="Residual")
ax.set_title("SIMPLISMA plot: " + self.X.name)
return ax
if __name__ == "__main__":
pass
| true | true |
1c471da88d59c2507feef993fda3aee7a9be0942 | 743 | py | Python | oscar/utils/argparse.py | IntelLabs/OSCAR | 25d1dea35727379117e11b7238b5a0d1ed19acad | [
"BSD-3-Clause"
] | 13 | 2021-02-12T18:41:53.000Z | 2022-01-14T07:17:15.000Z | oscar/utils/argparse.py | IntelLabs/OSCAR | 25d1dea35727379117e11b7238b5a0d1ed19acad | [
"BSD-3-Clause"
] | null | null | null | oscar/utils/argparse.py | IntelLabs/OSCAR | 25d1dea35727379117e11b7238b5a0d1ed19acad | [
"BSD-3-Clause"
] | 2 | 2021-03-05T18:27:23.000Z | 2021-03-05T23:16:09.000Z | #
# Copyright (C) 2020 Georgia Institute of Technology. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
from argparse import ArgumentParser, Action, Namespace
from typing import List
class NegateAction(Action):
# adapted from https://stackoverflow.com/a/34736291
def __call__(
self,
parser: ArgumentParser,
namespace: Namespace,
values: List[str],
option: str,
):
setattr(namespace, self.dest, option[2:4] != "no")
@classmethod
def add_to_parser(cls, parser: ArgumentParser, dest: str) -> ArgumentParser:
parser.add_argument(
f"--{dest}", f"--no_{dest}", dest=dest, action=cls, default=True, nargs=0
)
return parser
| 24.766667 | 85 | 0.643338 |
from argparse import ArgumentParser, Action, Namespace
from typing import List
class NegateAction(Action):
def __call__(
self,
parser: ArgumentParser,
namespace: Namespace,
values: List[str],
option: str,
):
setattr(namespace, self.dest, option[2:4] != "no")
@classmethod
def add_to_parser(cls, parser: ArgumentParser, dest: str) -> ArgumentParser:
parser.add_argument(
f"--{dest}", f"--no_{dest}", dest=dest, action=cls, default=True, nargs=0
)
return parser
| true | true |
1c471eec2870d4cfe9b1cd1a30eaad7b3ab34d2a | 14,023 | py | Python | experiment.py | pawni/sgld_online_approximation | 1edae8a669fdeef4e5501bcb07d6b809fc4cccd9 | [
"MIT"
] | 7 | 2017-04-25T08:49:22.000Z | 2018-05-14T08:42:34.000Z | experiment.py | pawni/sgld_online_approximation | 1edae8a669fdeef4e5501bcb07d6b809fc4cccd9 | [
"MIT"
] | null | null | null | experiment.py | pawni/sgld_online_approximation | 1edae8a669fdeef4e5501bcb07d6b809fc4cccd9 | [
"MIT"
] | 5 | 2017-05-27T07:00:07.000Z | 2020-03-09T04:04:58.000Z | import tensorflow as tf
import numpy as np
import os
from tensorflow.examples.tutorials.mnist import input_data
import edward as ed
from edward.models import Normal, Categorical, Multinomial, Empirical, PointMass
from tensorflow.python.training import moving_averages
# setup function to handle session configuration and seeding
def setup():
tf.reset_default_graph()
os.environ['CUDA_VISIBLE_DEVICES'] = ''
tf.set_random_seed(42)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
return sess
# function to return data readers - it assumes that the notMNIST dataset has
# been downloaded from https://github.com/davidflanagan/notMNIST-to-MNIST
def get_data():
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
notmnist = input_data.read_data_sets('notMNIST_data', one_hot=False)
return mnist, notmnist
# function to build a NN using a variables dict. If the variables for a 3 layer
# network is present it builds a 3 layer network. Otherwise it builds a 1 layer
# network. If a keep_prob for dropout is given it includes dropout in the model.
def build_nn(variables, dropout=None):
x_ = tf.reshape(variables['x'], [-1, 784])
if 'W_3' in variables:
if dropout:
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0']), keep_prob=dropout)
h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1, variables['W_1']) + variables['b_1']), keep_prob=dropout)
h3 = tf.nn.dropout(tf.nn.relu(tf.matmul(h2, variables['W_2']) + variables['b_2']), keep_prob=dropout)
else:
h1 = tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0'])
h2 = tf.nn.relu(tf.matmul(h1, variables['W_1']) + variables['b_1'])
h3 = tf.nn.relu(tf.matmul(h2, variables['W_2']) + variables['b_2'])
logits = tf.matmul(h3, variables['W_3']) + variables['b_3']
else:
if dropout:
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0']), keep_prob=dropout)
else:
h1 = tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0'])
logits = tf.matmul(h1, variables['W_1']) + variables['b_1']
return logits
# Builds the 1 layer probabilistic model using edward random variables
# returns the output and variables as dictionary
def get_model(dropout=None):
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.int32, shape=[None])
W_0 = Normal(mu=tf.zeros([784, 50]), sigma=tf.ones([784, 50]))
W_1 = Normal(mu=tf.zeros([50, 10]), sigma=tf.ones([50, 10]))
b_0 = Normal(mu=tf.zeros(50), sigma=tf.ones(50))
b_1 = Normal(mu=tf.zeros(10), sigma=tf.ones(10))
variables = {'W_0': W_0, 'W_1': W_1,
'b_0': b_0, 'b_1': b_1,
'x': x, 'y': y}
logits = build_nn(variables, dropout=dropout)
y_ = Categorical(logits=logits)
return y_, variables
# Builds the 3 layer probabilistic model using edward random variables
# returns the output and variables as dictionary
def get_model_3layer(dropout=None):
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.int32, shape=[None])
W_0 = Normal(mu=tf.zeros([784, 200]), sigma=tf.ones([784, 200]))
W_1 = Normal(mu=tf.zeros([200, 200]), sigma=tf.ones([200, 200]))
W_2 = Normal(mu=tf.zeros([200, 200]), sigma=tf.ones([200, 200]))
W_3 = Normal(mu=tf.zeros([200, 10]), sigma=tf.ones([200, 10]))
b_0 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))
b_1 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))
b_2 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))
b_3 = Normal(mu=tf.zeros(10), sigma=tf.ones(10))
variables = {'W_0': W_0, 'W_1': W_1, 'W_2': W_2, 'W_3': W_3,
'b_0': b_0, 'b_1': b_1, 'b_2': b_2, 'b_3': b_3,
'x': x, 'y': y}
logits = build_nn(variables, dropout=dropout)
y_ = Categorical(logits=logits)
return y_, variables
# Function to build an ensemble from the random variables and produce tensors
# for calculating the mean classificationa accuracy of the model as well as the
# per-datapoint-disagreement as defined in Lakshminarayanan et al. (2016), Simple and scalable
# predictive uncertainty estimation using deep ensembles
def get_metrics(model_variables, approx_variables, num_samples=10, dropout=None):
eps = 1e-8
ensemble_model = tf.stack([build_nn(
{key: approx_variables[key].sample()
if key in approx_variables else model_variables[key]
for key in model_variables}, dropout=dropout)
for _ in range(num_samples)])
ensemble_preds = tf.nn.softmax(ensemble_model)
disagreement = tf.reduce_sum(tf.reduce_sum(ensemble_preds
* tf.log(ensemble_preds
/ (tf.reduce_mean(ensemble_preds, axis=0)
+ eps)
+ eps),
axis=-1),
axis=0)
accuracy = tf.reduce_mean(
tf.cast(
tf.equal(
tf.cast(
tf.argmax(tf.reduce_mean(ensemble_preds, axis=0), axis=-1),
tf.int32),
model_variables['y']),
tf.float32))
return accuracy, disagreement
# Function to build an ensemble from the pretrained neural network states and produce tensors
# for calculating the mean classificationa accuracy of the model as well as the
# per-datapoint-disagreement as defined in Lakshminarayanan et al. (2016), Simple and scalable
# predictive uncertainty estimation using deep ensembles
def get_metrics_ensemble(model_variables, approx_variables, num_samples=10, dropout=None):
eps = 1e-8
ensemble_model = tf.stack([build_nn(
{key: approx_variables[i][key]
if key in approx_variables[i] else model_variables[key]
for key in model_variables})
for i in np.random.permutation(len(approx_variables))[:num_samples]])
ensemble_preds = tf.nn.softmax(ensemble_model)
disagreement = tf.reduce_sum(tf.reduce_sum(ensemble_preds
* tf.log(ensemble_preds
/ (tf.reduce_mean(ensemble_preds, axis=0)
+ eps)
+ eps),
axis=-1),
axis=0)
accuracy = tf.reduce_mean(
tf.cast(
tf.equal(
tf.cast(
tf.argmax(tf.reduce_mean(ensemble_preds, axis=0), axis=-1),
tf.int32),
model_variables['y']),
tf.float32))
return accuracy, disagreement
# function to run our proposed outlier detection based on disagreement thresholding.
# returns the number of correctly / incorrectly classified samples
def get_outlier_stats(model_variables, disagreement, mnist, notmnist):
batch = mnist.train.next_batch(100)
train_disagreements = disagreement.eval({model_variables['x']: batch[0],
model_variables['y']: batch[1]})
threshold = train_disagreements.mean() + 3. * train_disagreements.std()
mnist_disagreements = disagreement.eval({model_variables['x']: mnist.test.images,
model_variables['y']: mnist.test.labels})
notmnist_disagreements = disagreement.eval({model_variables['x']: notmnist.test.images,
model_variables['y']: notmnist.test.labels})
mnist_outlier = mnist_disagreements > threshold
notmnist_outlier = notmnist_disagreements > threshold
return {'TP': np.sum(notmnist_outlier),
'FN': np.sum(1 - notmnist_outlier),
'FP': np.sum(mnist_outlier),
'TN': np.sum(1 - mnist_outlier),
}
# function to return the variables for approximating the 1 layer model using variational inference
def get_vi_approximation_variables():
qW_0 = Normal(mu=tf.Variable(tf.random_normal([784, 50], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([784, 50], stddev=0.1))))
qW_1 = Normal(mu=tf.Variable(tf.random_normal([50, 10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([50, 10], stddev=0.1))))
qb_0 = Normal(mu=tf.Variable(tf.random_normal([50], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([50], stddev=0.1))))
qb_1 = Normal(mu=tf.Variable(tf.random_normal([10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([10], stddev=0.1))))
variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}
return variables
# function to return the variables for approximating the 3 layer model using variational inference
def get_vi_approximation_variables_3layer():
qW_0 = Normal(mu=tf.Variable(tf.random_normal([784, 200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([784, 200], stddev=0.1))))
qW_1 = Normal(mu=tf.Variable(tf.random_normal([200, 200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 200], stddev=0.1))))
qW_2 = Normal(mu=tf.Variable(tf.random_normal([200, 200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 200], stddev=0.1))))
qW_3 = Normal(mu=tf.Variable(tf.random_normal([200, 10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 10], stddev=0.1))))
qb_0 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))
qb_1 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))
qb_2 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))
qb_3 = Normal(mu=tf.Variable(tf.random_normal([10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([10], stddev=0.1))))
variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,
'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}
return variables
# function to return the variables for approximating the 1 layer model using our online approximation of sampling methods
def get_gauss_approximation_variables():
qW_0 = Normal(mu=tf.Variable(tf.zeros([784, 50])),
sigma=tf.Variable(tf.zeros([784, 50])))
qW_1 = Normal(mu=tf.Variable(tf.zeros([50, 10])),
sigma=tf.Variable(tf.zeros([50, 10])))
qb_0 = Normal(mu=tf.Variable(tf.zeros([50])),
sigma=tf.Variable(tf.zeros([50])))
qb_1 = Normal(mu=tf.Variable(tf.zeros([10])),
sigma=tf.Variable(tf.zeros([10])))
variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}
return variables
# function to return the variables for approximating the 3 layer model using our online approximation of sampling methods
def get_gauss_approximation_variables_3layer():
qW_0 = Normal(mu=tf.Variable(tf.zeros([784, 200])),
sigma=tf.Variable(tf.zeros([784, 200])))
qW_1 = Normal(mu=tf.Variable(tf.zeros([200, 200])),
sigma=tf.Variable(tf.zeros([200, 200])))
qW_2 = Normal(mu=tf.Variable(tf.zeros([200, 200])),
sigma=tf.Variable(tf.zeros([200, 200])))
qW_3 = Normal(mu=tf.Variable(tf.zeros([200, 10])),
sigma=tf.Variable(tf.zeros([200, 10])))
qb_0 = Normal(mu=tf.Variable(tf.zeros([200])),
sigma=tf.Variable(tf.zeros([200])))
qb_1 = Normal(mu=tf.Variable(tf.zeros([200])),
sigma=tf.Variable(tf.zeros([200])))
qb_2 = Normal(mu=tf.Variable(tf.zeros([200])),
sigma=tf.Variable(tf.zeros([200])))
qb_3 = Normal(mu=tf.Variable(tf.zeros([10])),
sigma=tf.Variable(tf.zeros([10])))
variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,
'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}
return variables
# function to return the variables for approximating the 1 layer model using MAP
def get_pointmass_approximation_variables():
qW_0 = PointMass(tf.Variable(tf.random_normal([784, 50], stddev=0.1)))
qW_1 = PointMass(tf.Variable(tf.random_normal([50, 10], stddev=0.1)))
qb_0 = PointMass(tf.Variable(tf.random_normal([50], stddev=0.1)))
qb_1 = PointMass(tf.Variable(tf.random_normal([10], stddev=0.1)))
variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}
return variables
# function to return the variables for approximating the 3 layer model using MAP
def get_pointmass_approximation_variables_3layer():
qW_0 = PointMass(tf.Variable(tf.random_normal([784, 200], stddev=0.1)))
qW_1 = PointMass(tf.Variable(tf.random_normal([200, 200], stddev=0.1)))
qW_2 = PointMass(tf.Variable(tf.random_normal([200, 200], stddev=0.1)))
qW_3 = PointMass(tf.Variable(tf.random_normal([200, 10], stddev=0.1)))
qb_0 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))
qb_1 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))
qb_2 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))
qb_3 = PointMass(tf.Variable(tf.random_normal([10], stddev=0.1)))
variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,
'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}
return variables
| 52.718045 | 121 | 0.617343 | import tensorflow as tf
import numpy as np
import os
from tensorflow.examples.tutorials.mnist import input_data
import edward as ed
from edward.models import Normal, Categorical, Multinomial, Empirical, PointMass
from tensorflow.python.training import moving_averages
def setup():
tf.reset_default_graph()
os.environ['CUDA_VISIBLE_DEVICES'] = ''
tf.set_random_seed(42)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
return sess
def get_data():
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
notmnist = input_data.read_data_sets('notMNIST_data', one_hot=False)
return mnist, notmnist
def build_nn(variables, dropout=None):
x_ = tf.reshape(variables['x'], [-1, 784])
if 'W_3' in variables:
if dropout:
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0']), keep_prob=dropout)
h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1, variables['W_1']) + variables['b_1']), keep_prob=dropout)
h3 = tf.nn.dropout(tf.nn.relu(tf.matmul(h2, variables['W_2']) + variables['b_2']), keep_prob=dropout)
else:
h1 = tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0'])
h2 = tf.nn.relu(tf.matmul(h1, variables['W_1']) + variables['b_1'])
h3 = tf.nn.relu(tf.matmul(h2, variables['W_2']) + variables['b_2'])
logits = tf.matmul(h3, variables['W_3']) + variables['b_3']
else:
if dropout:
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0']), keep_prob=dropout)
else:
h1 = tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0'])
logits = tf.matmul(h1, variables['W_1']) + variables['b_1']
return logits
def get_model(dropout=None):
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.int32, shape=[None])
W_0 = Normal(mu=tf.zeros([784, 50]), sigma=tf.ones([784, 50]))
W_1 = Normal(mu=tf.zeros([50, 10]), sigma=tf.ones([50, 10]))
b_0 = Normal(mu=tf.zeros(50), sigma=tf.ones(50))
b_1 = Normal(mu=tf.zeros(10), sigma=tf.ones(10))
variables = {'W_0': W_0, 'W_1': W_1,
'b_0': b_0, 'b_1': b_1,
'x': x, 'y': y}
logits = build_nn(variables, dropout=dropout)
y_ = Categorical(logits=logits)
return y_, variables
def get_model_3layer(dropout=None):
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.int32, shape=[None])
W_0 = Normal(mu=tf.zeros([784, 200]), sigma=tf.ones([784, 200]))
W_1 = Normal(mu=tf.zeros([200, 200]), sigma=tf.ones([200, 200]))
W_2 = Normal(mu=tf.zeros([200, 200]), sigma=tf.ones([200, 200]))
W_3 = Normal(mu=tf.zeros([200, 10]), sigma=tf.ones([200, 10]))
b_0 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))
b_1 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))
b_2 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))
b_3 = Normal(mu=tf.zeros(10), sigma=tf.ones(10))
variables = {'W_0': W_0, 'W_1': W_1, 'W_2': W_2, 'W_3': W_3,
'b_0': b_0, 'b_1': b_1, 'b_2': b_2, 'b_3': b_3,
'x': x, 'y': y}
logits = build_nn(variables, dropout=dropout)
y_ = Categorical(logits=logits)
return y_, variables
def get_metrics(model_variables, approx_variables, num_samples=10, dropout=None):
eps = 1e-8
ensemble_model = tf.stack([build_nn(
{key: approx_variables[key].sample()
if key in approx_variables else model_variables[key]
for key in model_variables}, dropout=dropout)
for _ in range(num_samples)])
ensemble_preds = tf.nn.softmax(ensemble_model)
disagreement = tf.reduce_sum(tf.reduce_sum(ensemble_preds
* tf.log(ensemble_preds
/ (tf.reduce_mean(ensemble_preds, axis=0)
+ eps)
+ eps),
axis=-1),
axis=0)
accuracy = tf.reduce_mean(
tf.cast(
tf.equal(
tf.cast(
tf.argmax(tf.reduce_mean(ensemble_preds, axis=0), axis=-1),
tf.int32),
model_variables['y']),
tf.float32))
return accuracy, disagreement
def get_metrics_ensemble(model_variables, approx_variables, num_samples=10, dropout=None):
eps = 1e-8
ensemble_model = tf.stack([build_nn(
{key: approx_variables[i][key]
if key in approx_variables[i] else model_variables[key]
for key in model_variables})
for i in np.random.permutation(len(approx_variables))[:num_samples]])
ensemble_preds = tf.nn.softmax(ensemble_model)
disagreement = tf.reduce_sum(tf.reduce_sum(ensemble_preds
* tf.log(ensemble_preds
/ (tf.reduce_mean(ensemble_preds, axis=0)
+ eps)
+ eps),
axis=-1),
axis=0)
accuracy = tf.reduce_mean(
tf.cast(
tf.equal(
tf.cast(
tf.argmax(tf.reduce_mean(ensemble_preds, axis=0), axis=-1),
tf.int32),
model_variables['y']),
tf.float32))
return accuracy, disagreement
def get_outlier_stats(model_variables, disagreement, mnist, notmnist):
batch = mnist.train.next_batch(100)
train_disagreements = disagreement.eval({model_variables['x']: batch[0],
model_variables['y']: batch[1]})
threshold = train_disagreements.mean() + 3. * train_disagreements.std()
mnist_disagreements = disagreement.eval({model_variables['x']: mnist.test.images,
model_variables['y']: mnist.test.labels})
notmnist_disagreements = disagreement.eval({model_variables['x']: notmnist.test.images,
model_variables['y']: notmnist.test.labels})
mnist_outlier = mnist_disagreements > threshold
notmnist_outlier = notmnist_disagreements > threshold
return {'TP': np.sum(notmnist_outlier),
'FN': np.sum(1 - notmnist_outlier),
'FP': np.sum(mnist_outlier),
'TN': np.sum(1 - mnist_outlier),
}
def get_vi_approximation_variables():
qW_0 = Normal(mu=tf.Variable(tf.random_normal([784, 50], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([784, 50], stddev=0.1))))
qW_1 = Normal(mu=tf.Variable(tf.random_normal([50, 10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([50, 10], stddev=0.1))))
qb_0 = Normal(mu=tf.Variable(tf.random_normal([50], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([50], stddev=0.1))))
qb_1 = Normal(mu=tf.Variable(tf.random_normal([10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([10], stddev=0.1))))
variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}
return variables
def get_vi_approximation_variables_3layer():
qW_0 = Normal(mu=tf.Variable(tf.random_normal([784, 200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([784, 200], stddev=0.1))))
qW_1 = Normal(mu=tf.Variable(tf.random_normal([200, 200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 200], stddev=0.1))))
qW_2 = Normal(mu=tf.Variable(tf.random_normal([200, 200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 200], stddev=0.1))))
qW_3 = Normal(mu=tf.Variable(tf.random_normal([200, 10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 10], stddev=0.1))))
qb_0 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))
qb_1 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))
qb_2 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))
qb_3 = Normal(mu=tf.Variable(tf.random_normal([10], stddev=0.1)),
sigma=tf.nn.softplus(tf.Variable(tf.random_normal([10], stddev=0.1))))
variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,
'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}
return variables
def get_gauss_approximation_variables():
qW_0 = Normal(mu=tf.Variable(tf.zeros([784, 50])),
sigma=tf.Variable(tf.zeros([784, 50])))
qW_1 = Normal(mu=tf.Variable(tf.zeros([50, 10])),
sigma=tf.Variable(tf.zeros([50, 10])))
qb_0 = Normal(mu=tf.Variable(tf.zeros([50])),
sigma=tf.Variable(tf.zeros([50])))
qb_1 = Normal(mu=tf.Variable(tf.zeros([10])),
sigma=tf.Variable(tf.zeros([10])))
variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}
return variables
def get_gauss_approximation_variables_3layer():
qW_0 = Normal(mu=tf.Variable(tf.zeros([784, 200])),
sigma=tf.Variable(tf.zeros([784, 200])))
qW_1 = Normal(mu=tf.Variable(tf.zeros([200, 200])),
sigma=tf.Variable(tf.zeros([200, 200])))
qW_2 = Normal(mu=tf.Variable(tf.zeros([200, 200])),
sigma=tf.Variable(tf.zeros([200, 200])))
qW_3 = Normal(mu=tf.Variable(tf.zeros([200, 10])),
sigma=tf.Variable(tf.zeros([200, 10])))
qb_0 = Normal(mu=tf.Variable(tf.zeros([200])),
sigma=tf.Variable(tf.zeros([200])))
qb_1 = Normal(mu=tf.Variable(tf.zeros([200])),
sigma=tf.Variable(tf.zeros([200])))
qb_2 = Normal(mu=tf.Variable(tf.zeros([200])),
sigma=tf.Variable(tf.zeros([200])))
qb_3 = Normal(mu=tf.Variable(tf.zeros([10])),
sigma=tf.Variable(tf.zeros([10])))
variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,
'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}
return variables
def get_pointmass_approximation_variables():
qW_0 = PointMass(tf.Variable(tf.random_normal([784, 50], stddev=0.1)))
qW_1 = PointMass(tf.Variable(tf.random_normal([50, 10], stddev=0.1)))
qb_0 = PointMass(tf.Variable(tf.random_normal([50], stddev=0.1)))
qb_1 = PointMass(tf.Variable(tf.random_normal([10], stddev=0.1)))
variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}
return variables
def get_pointmass_approximation_variables_3layer():
qW_0 = PointMass(tf.Variable(tf.random_normal([784, 200], stddev=0.1)))
qW_1 = PointMass(tf.Variable(tf.random_normal([200, 200], stddev=0.1)))
qW_2 = PointMass(tf.Variable(tf.random_normal([200, 200], stddev=0.1)))
qW_3 = PointMass(tf.Variable(tf.random_normal([200, 10], stddev=0.1)))
qb_0 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))
qb_1 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))
qb_2 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))
qb_3 = PointMass(tf.Variable(tf.random_normal([10], stddev=0.1)))
variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,
'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}
return variables
| true | true |
1c472050daedf1a77010a344d73d87639472c613 | 7,577 | py | Python | src/opnsense/scripts/netflow/lib/flowparser.py | ppmathis/opnsense-core | ffd506037a66804755cc3a7b3536a0a9450c10aa | [
"BSD-2-Clause"
] | null | null | null | src/opnsense/scripts/netflow/lib/flowparser.py | ppmathis/opnsense-core | ffd506037a66804755cc3a7b3536a0a9450c10aa | [
"BSD-2-Clause"
] | null | null | null | src/opnsense/scripts/netflow/lib/flowparser.py | ppmathis/opnsense-core | ffd506037a66804755cc3a7b3536a0a9450c10aa | [
"BSD-2-Clause"
] | null | null | null | """
Copyright (c) 2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
flowd log parser
"""
import struct
from socket import inet_ntop, AF_INET, AF_INET6, ntohl
class FlowParser:
# fields in order of appearance, use bitmask compare
field_definition_order = [
'tag',
'recv_time',
'proto_flags_tos',
'agent_addr4',
'agent_addr6',
'src_addr4',
'src_addr6',
'dst_addr4',
'dst_addr6',
'gateway_addr4',
'gateway_addr6',
'srcdst_port',
'packets',
'octets',
'if_indices',
'agent_info',
'flow_times',
'as_info',
'flow_engine_info'
]
# extract definition, integer values are read as rawdata (not parsed)
field_definition = {
'tag': 'I',
'recv_time': '>II',
'proto_flags_tos': 'BBBB',
'agent_addr4': 4,
'agent_addr6': 16,
'src_addr4': 4,
'src_addr6': 16,
'dst_addr4': 4,
'dst_addr6': 16,
'gateway_addr4': 4,
'gateway_addr6': 16,
'srcdst_port': '>HH',
'packets': '>Q',
'octets': '>Q',
'if_indices': '>II',
'agent_info': '>IIIHH',
'flow_times': '>II',
'as_info': 'IIBBH',
'flow_engine_info': 'HHII'
}
def __init__(self, filename, recv_stamp=None):
self._filename = filename
self._recv_stamp = recv_stamp
# cache formatter vs byte length
self._fmt_cache = dict()
# pre-calculate powers of 2
self._pow = dict()
for idx in range(len(self.field_definition_order)):
self._pow[idx] = pow(2, idx)
def calculate_size(self, fmt):
if fmt not in self._fmt_cache:
fmts = {'B': 1, 'H': 2, 'I': 4, 'Q': 8}
self._fmt_cache[fmt] = 0
for key in fmt:
if key in fmts:
self._fmt_cache[fmt] += fmts[key]
return self._fmt_cache[fmt]
def _parse_binary(self, raw_data, data_fields):
""" parse binary record
:param raw_data: binary data record
:param data_fields: field bitmask, provided by header
:return: dict
"""
raw_data_idx = 0
raw_record = dict()
for idx in range(len(self.field_definition_order)):
if self._pow[idx] & data_fields:
fieldname = self.field_definition_order[idx]
if fieldname in self.field_definition:
if type(self.field_definition[fieldname]) is int:
fsize = self.field_definition[fieldname]
raw_record[fieldname] = raw_data[raw_data_idx:raw_data_idx + fsize]
else:
fsize = self.calculate_size(self.field_definition[fieldname])
content = struct.unpack(
self.field_definition[fieldname],
raw_data[raw_data_idx:raw_data_idx + fsize]
)
raw_record[fieldname] = content[0] if len(content) == 1 else content
raw_data_idx += fsize
return raw_record
def __iter__(self):
""" iterate flowd log file
:return:
"""
# pre-compile address formatters to save time
with open(self._filename, 'rb') as flowh:
while True:
# header [version, len_words, reserved, fields]
hdata = flowh.read(8)
if hdata == b'':
break
header = struct.unpack('BBHI', hdata)
record = self._parse_binary(
raw_data=flowh.read(header[1] * 4),
data_fields=ntohl(header[3])
)
if 'recv_time' not in record or 'agent_info' not in record:
# XXX invalid (empty?) flow record.
continue
record['recv_sec'] = record['recv_time'][0]
if self._recv_stamp is not None and record['recv_sec'] < self._recv_stamp:
# self._recv_stamp can contain the last received timestamp, in which case
# we should not return older data. The exact timestamp will be returned, so the
# consumer knows it doesn't have to read other, older, flowd log files
continue
record['sys_uptime_ms'] = record['agent_info'][0]
record['netflow_ver'] = record['agent_info'][3]
record['recv'] = record['recv_sec']
record['recv_usec'] = record['recv_time'][1]
if 'proto_flags_tos' in record:
record['tcp_flags'] = record['proto_flags_tos'][0]
record['protocol'] = record['proto_flags_tos'][1]
record['tos'] = record['proto_flags_tos'][2]
if 'flow_times' in record:
record['flow_start'] = record['flow_times'][0]
record['flow_finish'] = record['flow_times'][1]
if 'if_indices' in record:
record['if_ndx_in'] = record['if_indices'][0]
record['if_ndx_out'] = record['if_indices'][1]
if 'srcdst_port' in record:
record['src_port'] = record['srcdst_port'][0]
record['dst_port'] = record['srcdst_port'][1]
# concat ipv4/v6 fields into field without [4,6]
for key in self.field_definition_order:
if key in record:
if key[-1] == '4':
record[key[:-1]] = inet_ntop(AF_INET, record[key])
elif key[-1] == '6':
record[key[:-1]] = inet_ntop(AF_INET6, record[key])
# calculated values
record['flow_end'] = record['recv_sec'] - (record['sys_uptime_ms'] - record['flow_finish']) / 1000.0
record['duration_ms'] = (record['flow_finish'] - record['flow_start'])
record['flow_start'] = record['flow_end'] - record['duration_ms'] / 1000.0
yield record
| 41.631868 | 116 | 0.551538 | import struct
from socket import inet_ntop, AF_INET, AF_INET6, ntohl
class FlowParser:
field_definition_order = [
'tag',
'recv_time',
'proto_flags_tos',
'agent_addr4',
'agent_addr6',
'src_addr4',
'src_addr6',
'dst_addr4',
'dst_addr6',
'gateway_addr4',
'gateway_addr6',
'srcdst_port',
'packets',
'octets',
'if_indices',
'agent_info',
'flow_times',
'as_info',
'flow_engine_info'
]
field_definition = {
'tag': 'I',
'recv_time': '>II',
'proto_flags_tos': 'BBBB',
'agent_addr4': 4,
'agent_addr6': 16,
'src_addr4': 4,
'src_addr6': 16,
'dst_addr4': 4,
'dst_addr6': 16,
'gateway_addr4': 4,
'gateway_addr6': 16,
'srcdst_port': '>HH',
'packets': '>Q',
'octets': '>Q',
'if_indices': '>II',
'agent_info': '>IIIHH',
'flow_times': '>II',
'as_info': 'IIBBH',
'flow_engine_info': 'HHII'
}
def __init__(self, filename, recv_stamp=None):
self._filename = filename
self._recv_stamp = recv_stamp
self._fmt_cache = dict()
self._pow = dict()
for idx in range(len(self.field_definition_order)):
self._pow[idx] = pow(2, idx)
def calculate_size(self, fmt):
if fmt not in self._fmt_cache:
fmts = {'B': 1, 'H': 2, 'I': 4, 'Q': 8}
self._fmt_cache[fmt] = 0
for key in fmt:
if key in fmts:
self._fmt_cache[fmt] += fmts[key]
return self._fmt_cache[fmt]
def _parse_binary(self, raw_data, data_fields):
raw_data_idx = 0
raw_record = dict()
for idx in range(len(self.field_definition_order)):
if self._pow[idx] & data_fields:
fieldname = self.field_definition_order[idx]
if fieldname in self.field_definition:
if type(self.field_definition[fieldname]) is int:
fsize = self.field_definition[fieldname]
raw_record[fieldname] = raw_data[raw_data_idx:raw_data_idx + fsize]
else:
fsize = self.calculate_size(self.field_definition[fieldname])
content = struct.unpack(
self.field_definition[fieldname],
raw_data[raw_data_idx:raw_data_idx + fsize]
)
raw_record[fieldname] = content[0] if len(content) == 1 else content
raw_data_idx += fsize
return raw_record
def __iter__(self):
with open(self._filename, 'rb') as flowh:
while True:
hdata = flowh.read(8)
if hdata == b'':
break
header = struct.unpack('BBHI', hdata)
record = self._parse_binary(
raw_data=flowh.read(header[1] * 4),
data_fields=ntohl(header[3])
)
if 'recv_time' not in record or 'agent_info' not in record:
continue
record['recv_sec'] = record['recv_time'][0]
if self._recv_stamp is not None and record['recv_sec'] < self._recv_stamp:
continue
record['sys_uptime_ms'] = record['agent_info'][0]
record['netflow_ver'] = record['agent_info'][3]
record['recv'] = record['recv_sec']
record['recv_usec'] = record['recv_time'][1]
if 'proto_flags_tos' in record:
record['tcp_flags'] = record['proto_flags_tos'][0]
record['protocol'] = record['proto_flags_tos'][1]
record['tos'] = record['proto_flags_tos'][2]
if 'flow_times' in record:
record['flow_start'] = record['flow_times'][0]
record['flow_finish'] = record['flow_times'][1]
if 'if_indices' in record:
record['if_ndx_in'] = record['if_indices'][0]
record['if_ndx_out'] = record['if_indices'][1]
if 'srcdst_port' in record:
record['src_port'] = record['srcdst_port'][0]
record['dst_port'] = record['srcdst_port'][1]
# concat ipv4/v6 fields into field without [4,6]
for key in self.field_definition_order:
if key in record:
if key[-1] == '4':
record[key[:-1]] = inet_ntop(AF_INET, record[key])
elif key[-1] == '6':
record[key[:-1]] = inet_ntop(AF_INET6, record[key])
# calculated values
record['flow_end'] = record['recv_sec'] - (record['sys_uptime_ms'] - record['flow_finish']) / 1000.0
record['duration_ms'] = (record['flow_finish'] - record['flow_start'])
record['flow_start'] = record['flow_end'] - record['duration_ms'] / 1000.0
yield record
| true | true |
1c472076ed04a3d5c186ecdaba2d82b57688477b | 55,537 | py | Python | methylcheck/qc_plot.py | LifeEGX/methQC | 2b4f960e7e5c7baca9dc778ca05ee332e2f27653 | [
"MIT"
] | 2 | 2019-10-13T21:42:14.000Z | 2019-10-16T19:08:49.000Z | methylcheck/qc_plot.py | LifeEGX/methQC | 2b4f960e7e5c7baca9dc778ca05ee332e2f27653 | [
"MIT"
] | 9 | 2019-07-15T18:56:51.000Z | 2019-07-23T17:25:53.000Z | methylcheck/qc_plot.py | LifeEGX/methQC | 2b4f960e7e5c7baca9dc778ca05ee332e2f27653 | [
"MIT"
] | 1 | 2020-01-09T14:26:02.000Z | 2020-01-09T14:26:02.000Z | import warnings
from pathlib import Path
import logging
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
#app
import methylcheck
from .progress_bar import *
LOGGER = logging.getLogger(__name__)
__all__ = ['run_qc', 'plot_beta_by_type', 'qc_signal_intensity', 'plot_M_vs_U', 'plot_controls', 'bis_conversion_control']
def run_qc(path):
"""Generates all QC plots for a dataset in the path provided.
if `process --all` was used to create control probes and raw values for QC,
because it uses four output files:
- beta_values.pkl
- control_probes.pkl
- meth_values.pkl or noob_meth_values.pkl
- unmeth_values.pkl or noob_unmeth_values.pkl
output is all to screen, so best to use in a jupyter notebook.
If you prefer output in a PDF, use ReportPDF instead.
Note: this will only look in the path folder; it doesn't do a recursive search for matching files.
"""
try:
beta_df = pd.read_pickle(Path(path,'beta_values.pkl').expanduser())
controls = pd.read_pickle(Path(path,'control_probes.pkl').expanduser())
if Path(path,'meth_values.pkl').expanduser().exists() and Path(path,'unmeth_values.pkl').expanduser().exists():
meth_df = pd.read_pickle(Path(path,'meth_values.pkl').expanduser())
unmeth_df = pd.read_pickle(Path(path,'unmeth_values.pkl').expanduser())
else:
meth_df = pd.read_pickle(Path(path,'noob_meth_values.pkl').expanduser())
unmeth_df = pd.read_pickle(Path(path,'noob_unmeth_values.pkl').expanduser())
if Path(path,'poobah_values.pkl').expanduser().exists():
poobah = pd.read_pickle(Path(path,'poobah_values.pkl').expanduser())
else:
poobah = None
except FileNotFoundError:
if not Path(path).exists():
raise FileNotFoundError("Invalid path")
elif not Path(path).is_dir():
raise FileNotFoundError("Path is not a directory.")
raise FileNotFoundError("Files missing. run_qc() only works if you used `methylprep process --all` option to produce beta_values, control_probes, meth_values, and unmeth_values files.")
# needs meth_df, unmeth_df, controls, and beta_df
# if passing in a path, it will auto-search for poobah. but if meth/unmeth passed in, you must explicitly tell it to look.
plot_M_vs_U(meth=meth_df, unmeth=unmeth_df, poobah=poobah)
qc_signal_intensity(meth=meth_df, unmeth=unmeth_df, poobah=poobah)
plot_controls(controls, 'all')
plot_beta_by_type(beta_df, 'all')
def qc_signal_intensity(data_containers=None, path=None, meth=None, unmeth=None, poobah=None, palette=None,
noob=True, silent=False, verbose=False, plot=True, cutoff_line=True, bad_sample_cutoff=11.5, return_fig=False):
"""Suggests sample outliers based on methylated and unmethylated signal intensity.
input (one of these):
=====================
path
to csv files processed using methylprep
these have "noob_meth" and "noob_unmeth" columns per sample file this function can use.
if you want it to processed data uncorrected data.
data_containers
output from the methylprep.run_pipeline() command when run in a script or notebook.
you can also recreate the list of datacontainers using methylcheck.load(<filepath>,'meth')
(meth and unmeth)
if you chose `process --all` you can load the raw intensities like this, and pass them in:
meth = pd.read_pickle('meth_values.pkl')
unmeth = pd.read_pickle('unmeth_values.pkl')
THIS will run the fastest.
(meth and unmeth and poobah)
if poobah=None (default): Does nothing
if poobah=False: suppresses this color
if poobah=dataframe: color-codes samples according to percent probe failure range,
but only if you pass in meth and unmeth dataframes too, not data_containers object.
if poobah=True: looks for poobah_values.pkl in the path provided.
optional params:
================
cutoff_line: True will draw the line; False omits it.
bad_sample_cutoff (default 11.5): set the cutoff for determining good vs bad samples, based on signal intensities of meth and unmeth fluorescence channels. 10.5 was borrowed from minfi's internal defaults.
noob: use noob-corrected meth/unmeth values
verbose: additional messages
plot: if True (default), shows a plot. if False, this function returns the median values per sample of meth and unmeth probes.
return_fig (False default), if True, and plot is True, returns a figure object instead of showing plot.
compare: if the processed data contains both noob and uncorrected values, it will plot both in different colors
palette: if using poobah to color code, you can specify a Seaborn palette to use.
this will draw a diagonal line on plots
returns:
========
A dictionary of data about good/bad samples based on signal intensity
TODO:
doesn't return both types of data if using compare and not plotting
doesn't give good error message for compare
"""
if not path and not data_containers and type(meth) is type(None) and type(unmeth) is type(None):
print("ERROR: You must specify a path to methylprep processed data files or provide a data_containers object as input.")
return
if not isinstance(data_containers,list) and isinstance(data_containers, (str,Path)):
print("ERROR: If you want to supply a path to your processed files, use 'path=<path>'.")
return
# path can be a string, but must be converted to a Path
if isinstance(path, str):
path = Path(path)
# meth can be none, or df, or path
if isinstance(meth, type(None)) and isinstance(unmeth, type(None)):
meth, unmeth = _get_data(data_containers=data_containers, path=path, compare=False, noob=noob, verbose=verbose)
if (path is not None and not isinstance(poobah, pd.DataFrame)
and not isinstance(poobah, type(None))):
if poobah in (False,None):
pass # unless poobah IS a dataframe below, nothing happens. None/False suppress this
else:
if 'poobah_values.pkl' in [i.name for i in list(path.rglob('poobah_values.pkl'))]:
poobah = pd.read_pickle(list(path.rglob('poobah_values.pkl'))[0])
else:
if verbose and not silent:
LOGGER.info("Cannot load poobah_values.pkl file.")
# Plotting
medians = _make_qc_df(meth,unmeth)
cutoffs = (medians.mMed.values + medians.uMed.values)/2
bad_samples = medians.index[cutoffs < bad_sample_cutoff]
# flex the x and y axes depending on the data
min_x = int(min(medians.mMed))
max_x = max(medians.mMed) + 1
min_y = int(min(medians.uMed))
max_y = max(medians.uMed) + 1
if not plot:
return {
'medians': medians,
'cutoffs': cutoffs,
'good_samples': [str(s) for s in medians.index[cutoffs >= bad_sample_cutoff]],
'bad_samples': [str(s) for s in bad_samples],
'bad_sample_cutoff': bad_sample_cutoff,
}
# set up figure
fig,ax = plt.subplots(figsize=(10,10))
plt.grid(color=(0.8, 0.8, 0.8), linestyle='dotted')
plt.xlabel('Meth Median Intensity (log2)', fontsize='large')
plt.ylabel('Unmeth Median Intensity (log2)', fontsize='large')
if not isinstance(poobah, pd.DataFrame):
plt.title('Log M versus U plot')
# bad values
plt.scatter(x='mMed',y='uMed',data=medians[medians.index.isin(bad_samples)],label='Bad Samples',c='red')
# good values
plt.scatter(x='mMed',y='uMed',data=medians[~medians.index.isin(bad_samples)],label="Good Samples",c='black')
elif isinstance(poobah, pd.DataFrame):
plt.title('Log M versus U plot: Colors are the percent of probe failures per sample')
if poobah.isna().sum().sum() > 0:
if poobah.isna().equals(meth.isna()) and poobah.isna().equals(unmeth.isna()):
pass # not a problem if the SAME probes are excluded in all dataframes
else:
LOGGER.warning("Your poobah_values.pkl file contains missing values; color coding will be inaccurate.")
percent_failures = round(100*( poobah[poobah > 0.05].count() / poobah.count() ),1)
percent_failures = percent_failures.rename('probe_failure_(%)')
# Series.where will replace the stuff that is False, so you have to negate it.
percent_failures_hues = percent_failures.where(~percent_failures.between(0,5), 0)
percent_failures_hues.where(~percent_failures_hues.between(5,10), 1, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(10,15), 2, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(15,20), 3, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(20,25), 4, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(25,30), 5, inplace=True)
percent_failures_hues.where(~(percent_failures_hues > 30), 6, inplace=True)
percent_failures_hues = percent_failures_hues.astype(int)
#sizes = percent_failures_hues.copy()
percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'20 to 25', 5:'25 to 30', 6:'>30'})
legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','20 to 25','25 to 30','>30']
try:
qc = pd.merge(left=medians,
right=percent_failures_hues,
left_on=medians.index,
right_on=percent_failures_hues.index,
how='inner')
except:
# edge case where meth/unmeth medians loses sample sentrix_ids, but poobah pkl retains them - proceed with merging assuming order is retained
tempA = medians.reset_index(drop=True)
tempB = percent_failures_hues.reset_index(drop=True)
#qc = pd.merge(left=tempA,right=tempB,left_on=tempA.index,right_on=tempB.index,how='inner')
qc = pd.concat([tempA, tempB], axis='columns') # pandas 1.3x needs this. Above .merge fails when inner-joining on range-indeces.
hues_palette = sb.color_palette("twilight", n_colors=7, desat=0.8) if palette is None else sb.color_palette(palette, n_colors=7, desat=0.8)
this = sb.scatterplot(data=qc, x="mMed", y="uMed", hue="probe_failure_(%)",
palette=hues_palette, hue_order=legend_order, legend="full") # size="size"
else:
raise NotImplementedError("poobah color coding is not implemented with 'compare' option")
plt.xlim([min_x,max_x])
plt.ylim([min_y,max_y])
if cutoff_line:
x = np.linspace(6,14)
y = -1*x+(2*bad_sample_cutoff)
plt.plot(x, y, '--', lw=1, color='lightgrey', alpha=0.75, label='Cutoff')
# legend
legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')
legend.set_title("Probe failure rate (%)", prop={'size':'large'})
# display plot
if return_fig:
return fig
plt.show()
plt.close('all')
# print list of bad samples for user
if len(bad_samples) > 0:
print('List of Bad Samples')
print([str(s) for s in bad_samples])
return {
'medians': medians,
'cutoffs': cutoffs,
'good_samples': [str(s) for s in medians.index[cutoffs >= bad_sample_cutoff]],
'bad_samples': [str(s) for s in bad_samples],
'bad_sample_cutoff': bad_sample_cutoff,
}
def _make_qc_df(meth,unmeth):
"""Function takes meth and unmeth dataframes,
returns a single dataframe with log2 medians for
m and u values"""
mmed = pd.DataFrame(np.log2(meth.median(axis=0)),columns=['mMed'])
umed = pd.DataFrame(np.log2(unmeth.median(axis=0)),columns=['uMed'])
qc = pd.merge(left=mmed,
right=umed,
left_on=mmed.index,
right_on=umed.index,
how='inner').set_index('key_0',drop=True)
#del qc.index.name
qc.index.name = None
return qc
def _get_data(data_containers=None, path=None, compare=False, noob=True, verbose=True):
""" internal function that loads data from object or path and returns 2 or 4 dataframes """
# NOTE: not a flexible function because it returns 0, 2, or 4 objects depending on inputs.
# NOTE: this requires that data_containers label the index 'IlmnID' for each sample
if data_containers:
# Pull M and U values
meth = pd.DataFrame(index=data_containers[0]._SampleDataContainer__data_frame.index)
unmeth = pd.DataFrame(index=data_containers[0]._SampleDataContainer__data_frame.index)
for i,c in enumerate(data_containers):
sample = data_containers[i].sample
m = c._SampleDataContainer__data_frame.rename(columns={'meth':sample})
u = c._SampleDataContainer__data_frame.rename(columns={'unmeth':sample})
meth = pd.merge(left=meth,right=m[sample],left_on='IlmnID',right_on='IlmnID',)
unmeth = pd.merge(left=unmeth,right=u[sample],left_on='IlmnID',right_on='IlmnID')
elif path:
n = 'noob_' if noob else ''
# first try to load from disk
if (noob and Path(path, f'{n}meth_values.pkl').exists() and
Path(path, f'{n}unmeth_values.pkl').exists()):
_meth = pd.read_pickle(Path(path, f'{n}meth_values.pkl'))
_unmeth = pd.read_pickle(Path(path, f'{n}unmeth_values.pkl'))
return _meth, _unmeth
# THIS DOES NOT warn user if they want noob and the files don't exist.
elif Path(path, 'meth_values.pkl').exists() and Path(path,'unmeth_values.pkl').exists() and not compare:
_meth = pd.read_pickle(Path(path, 'meth_values.pkl'))
_unmeth = pd.read_pickle(Path(path, 'unmeth_values.pkl'))
return _meth, _unmeth
elif (compare and
Path(path, 'meth_values.pkl').exists() and
Path(path, 'unmeth_values.pkl').exists() and
Path(path, f'{n}meth_values.pkl').exists() and
Path(path, f'{n}unmeth_values.pkl').exists()):
meth = pd.read_pickle(Path(path, 'meth_values.pkl'))
unmeth = pd.read_pickle(Path(path, 'unmeth_values.pkl'))
_meth = pd.read_pickle(Path(path, f'{n}meth_values.pkl'))
_unmeth = pd.read_pickle(Path(path, f'{n}unmeth_values.pkl'))
return meth, unmeth, _meth, _unmeth
else:
sample_filenames = []
csvs = []
files_found = False
for file in tqdm(Path(path).expanduser().rglob('*_processed.csv'), desc='Loading files', total=len(list(Path(path).expanduser().rglob('*_processed.csv')))):
this = pd.read_csv(file)
files_found = True
if f'{n}meth' in this.columns and f'{n}unmeth' in this.columns:
csvs.append(this)
sample_filenames.append(str(file.stem).replace('_processed',''))
# note, this doesn't give a clear error message if using compare and missing uncorrected data.
if verbose and len(csvs) > 0:
print(f"{len(csvs)} processed samples found.")
if csvs != []:
meth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n}meth']})
unmeth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n}unmeth']})
meth.set_index('IlmnID', inplace=True)
unmeth.set_index('IlmnID', inplace=True)
if compare:
n2 = '' if noob else 'noob_'
_meth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n2}meth']})
_unmeth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n2}unmeth']})
_meth.set_index('IlmnID', inplace=True)
_unmeth.set_index('IlmnID', inplace=True)
for idx, sample in tqdm(enumerate(csvs[1:],1), desc='Samples', total=len(csvs)):
# columns are meth, unmeth OR noob_meth, noob_unmeth, AND IlmnID
meth = pd.merge(left=meth, right=sample[f'{n}meth'], left_on='IlmnID', right_on=sample['IlmnID'])
meth = meth.rename(columns={f'{n}meth': sample_filenames[idx]})
unmeth = pd.merge(left=unmeth, right=sample[f'{n}unmeth'], left_on='IlmnID', right_on=sample['IlmnID'])
unmeth = unmeth.rename(columns={f'{n}unmeth': sample_filenames[idx]})
if compare:
_meth = pd.merge(left=_meth, right=sample[f'{n2}meth'], left_on='IlmnID', right_on=sample['IlmnID'])
_meth = _meth.rename(columns={f'{n2}meth': sample_filenames[idx]})
_unmeth = pd.merge(left=_unmeth, right=sample[f'{n2}unmeth'], left_on='IlmnID', right_on=sample['IlmnID'])
_unmeth = _unmeth.rename(columns={f'{n2}unmeth': sample_filenames[idx]})
else:
if verbose:
print(f"{len(csvs)} processed samples found in {path} using NOOB: {noob}.")
if files_found:
data_columns = "NOOB meth/unmeth" if noob else "non-NOOB-corrected meth/unmeth"
print(f"processed files found, but did not contain the right data ({data_columns})")
return
if compare:
return meth, unmeth, _meth, _unmeth
return meth, unmeth
def plot_M_vs_U(data_containers_or_path=None, meth=None, unmeth=None, poobah=None,
noob=True, silent=False, verbose=False, plot=True, compare=False, return_fig=False, palette=None,
cutoff_line=True):
"""plot methylated vs unmethylated probe intensities
input (choose one of these):
============================
PATH to csv files processed using methylprep
these have "noob_meth" and "noob_unmeth" columns per sample file this function can use.
if you want it to processed data uncorrected data.
(If there is a poobah_values.pkl file in this PATH, it will use the file to color code points)
data_containers = run_pipeline(data_dir = 'somepath',
save_uncorrected=True,
sample_sheet_filepath='samplesheet.csv')
you can also recreate the list of datacontainers using methylcheck.load(<filepath>,'meth')
(meth and unmeth)
if you chose `process --all` you can load the raw intensities like this, and pass them in:
meth = pd.read_pickle('meth_values.pkl')
unmeth = pd.read_pickle('unmeth_values.pkl')
THIS will run the fastest.
poobah
filepath: You may supply the file path to the p-value detection dataframe. If supplied, it will color
code points on the plot.
False: set poobah to False to suppress this coloring.
None (default): if there is a poobah_values.pkl file in your path, it will use it.
optional params:
noob: use noob-corrected meth/unmeth values
verbose: additional messages
plot: if True (default), shows a plot. if False, this function returns the median values per sample of meth and unmeth probes.
return_fig: (False default), if True (and plot is true), returns the figure object instead of showing it.
compare:
if the processed data contains both noob and uncorrected values, it will plot both in different colors
the compare option will not work with using the 'meth' and 'unmeth' inputs, only with path or data_containers.
cutoff_line: True will draw a diagonal line on plots.
the cutoff line is based on the X-Y scale of the plot, which depends on the range of intensity values in your data set.
TODO:
doesn't return both types of data if using compare and not plotting
doesn't give good error message for compare
"""
try:
if Path(data_containers_or_path).exists(): # if passing in a valid string, this should work.
path = Path(data_containers_or_path)
else:
path = None
except TypeError:
path = None # fails if passing in a data_containers object
if isinstance(data_containers_or_path, Path): #this only recognizes a Path object, not a string path
path = data_containers_or_path
data_containers = None
elif isinstance(path, Path):
data_containers = None
else:
path = None
data_containers = data_containers_or_path # by process of exclusion, this must be an object, or None
if isinstance(data_containers_or_path, pd.DataFrame):
raise ValueError("M_vs_U cannot plot a dataframe of processed data; requires meth and unmeth values.")
if not isinstance(path, Path) and isinstance(data_containers, type(None)) and not isinstance(meth, pd.DataFrame) and not isinstance(unmeth, pd.DataFrame):
print("You must specify a path to methylprep processed data files, or provide a data_containers object as input, or pass in meth and unmeth dataframes.")
# hasattr: user defined class instances should have __name__ and other objects should not
return
# 2. load meth + unmeth from path
elif isinstance(meth,type(None)) and isinstance(unmeth,type(None)):
try:
if compare:
meth, unmeth, _meth, _unmeth = _get_data(data_containers, path, compare=compare, noob=noob)
else:
meth, unmeth = _get_data(data_containers, path, compare=compare, noob=noob)
except Exception as e:
print(e)
print("No processed data found.")
return
# 2. load poobah_df if exists
if isinstance(poobah,bool) and poobah == False:
poobah_df = None
elif isinstance(poobah, pd.DataFrame):
poobah_df = poobah
poobah = True
else:
poobah_df = None
if isinstance(path, Path) and 'poobah_values.pkl' in [i.name for i in list(path.rglob('poobah_values.pkl'))]:
poobah_df = pd.read_pickle(list(path.rglob('poobah_values.pkl'))[0])
poobah=True
else:
if poobah_df is None: # didn't find a poobah file to load
LOGGER.warning("Did not find a poobah_values.pkl file; unable to color-code plot.")
poobah = False #user may have set this to True or None, but changing params to fit data.
if verbose and not silent and isinstance(poobah_df,pd.DataFrame):
LOGGER.info("Using poobah_values.pkl")
#palette options to pass in: "CMRmap" "flare" "twilight" "Blues", "tab10"
hues_palette = sb.color_palette("twilight", n_colors=7, desat=0.8) if palette is None else sb.color_palette(palette, n_colors=7, desat=0.8)
if poobah is not False and isinstance(poobah_df, pd.DataFrame) and not compare:
if poobah_df.isna().sum().sum() > 0:
if poobah_df.isna().equals(meth.isna()) and poobah_df.isna().equals(unmeth.isna()):
pass # not a problem if the SAME probes are excluded in all dataframes
else:
LOGGER.warning("Your poobah_values.pkl file contains missing values; color coding will be inaccurate.")
percent_failures = round(100*( poobah_df[poobah_df > 0.05].count() / poobah_df.count() ),1)
percent_failures = percent_failures.rename('probe_failure (%)')
meth_med = meth.median()
unmeth_med = unmeth.median()
# Series.where will replace the stuff that is False, so you have to negate it.
percent_failures_hues = percent_failures.where(~percent_failures.between(0,5), 0)
percent_failures_hues.where(~percent_failures_hues.between(5,10), 1, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(10,15), 2, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(15,20), 3, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(20,25), 4, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(25,30), 5, inplace=True)
percent_failures_hues.where(~(percent_failures_hues > 30), 6, inplace=True)
percent_failures_hues = percent_failures_hues.astype(int)
#sizes = percent_failures_hues.copy()
percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'20 to 25', 5:'25 to 30', 6:'>30'})
legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','20 to 25','25 to 30','>30']
df = pd.concat([
meth_med.rename('meth'),
unmeth_med.rename('unmeth'),
percent_failures_hues],
#sizes.rename('size')],
axis=1)
if plot:
# plot it
fig,ax = plt.subplots(figsize=(10,10))
plt.grid(color=(0.8, 0.8, 0.8), linestyle='dotted')
if poobah and not compare:
this = sb.scatterplot(data=df, x="meth", y="unmeth", hue="probe_failure (%)",
palette=hues_palette, hue_order=legend_order, legend="full") # size="size"
legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')
legend.set_title("Probe failure rate (%)", prop={'size':'large'})
elif not poobah and not compare:
this = sb.scatterplot(x=meth.median(),y=unmeth.median(),s=75)
elif compare:
data_df = pd.DataFrame(data={
'meth': meth.median(),
'unmeth': unmeth.median()
})
data_df["hue"] = "Raw intensity"
data_df2 = pd.DataFrame(data={ # the NOOB version
'meth': _meth.median(),
'unmeth': _unmeth.median()
})
# each data set should have same samples in same order, so label_lookup will work for both hues
label_lookup = {index_val: chr(i+65) if i <= 26 else str(i-26) for i,index_val in enumerate(data_df.index)}
data_df2['hue'] = "Corrected intensity"
data_df = data_df.append(data_df2)
del data_df2
legend_order = ["Raw intensity", "Corrected intensity"]
hues_palette = sb.color_palette("tab10", n_colors=2) if palette is None else sb.color_palette(palette, n_colors=2)
this = sb.scatterplot(data=data_df, x='meth', y='unmeth', hue='hue', palette=hues_palette)
# FINALLY, label ALL points so you can compare the shifts
for index_val, row in data_df.iterrows():
color_code = {"Raw intensity":"blue", "Corrected intensity": "darkorange"}
#proxy_label = chr(i+65) if i <= 52 else str(i-65)
proxy_label = label_lookup.get(index_val,"-1")
plt.text(x=row["meth"]+7, y=row["unmeth"]+7, s=proxy_label,
fontdict={'color':color_code.get(row["hue"], "black"), 'size':8, 'family':'sans-serif'})
#bbox=dict(facecolor=’yellow’,alpha=0.5))
if poobah and not compare:
plt.title('M versus U plot: Colors are the percent of probe failures per sample')
elif compare:
plt.title('M versus U plot: Showing effect of processing fluorescence intensities')
else:
plt.title('M versus U plot')
plt.xlabel('Median Methylated Intensity', fontsize='large')
plt.ylabel('Median Unmethylated Intensity', fontsize='large')
# add diagonal line
if cutoff_line:
line = {'y': this.axes.get_ylim(), 'x': this.axes.get_xlim()}
sx = []
sy = []
for i in range(1000):
sx.append(line['x'][0] + i/1000*(line['x'][1] - line['x'][0]))
sy.append(line['y'][0] + i/1000*(line['y'][1] - line['y'][0]))
this = sb.scatterplot(x=sx, y=sy, s=3, color=(0.8, 0.8, 0.8))
if poobah:
# This is necessary because legend title disappears when adding cutoff-line for some reason.
legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')
legend.set_title("Probe failure rate (%)", prop={'size':'large'})
if return_fig:
return this.get_figure()
plt.show()
plt.close('all')
else:
return {'meth_median': meth.median(), 'unmeth_median': unmeth.median()}
def plot_beta_by_type(beta_df, probe_type='all', return_fig=False, silent=False, on_lambda=False):
"""compare betas for type I and II probes -- (inspired by the plotBetasByType() function)
Plot the overall density distribution of beta values and the density distributions of the Infinium I or II probe types
1 distribution plot; user defines type (I or II infinium)
Doesn't work with 27k arrays because they are all of the same type, Infinium Type I.
options:
return_fig: (default False) if True, returns a list of figure objects instead of showing plots.
"""
mouse_probe_types = ['cg','ch','uk']
probe_types = ['I', 'II', 'IR', 'IG', 'all'] # 'SnpI', 'Control' are in manifest, but not in the processed data
if probe_type not in probe_types + mouse_probe_types:
raise ValueError(f"Please specify an Infinium probe_type: ({probe_types}) to plot or, if mouse array, one of these ({mouse_probe_types}) or 'all'.")
# orient
if beta_df.shape[1] > beta_df.shape[0]:
beta_df = beta_df.transpose() # probes should be in rows.
array_type, man_filepath = methylcheck.detect_array(beta_df, returns='filepath', on_lambda=on_lambda)
# note that 'array_type' can look like string 'mouse' but only str(array_type) will match the string 'mouse'
if Path.exists(man_filepath):
try:
from methylprep import Manifest, ArrayType
except ImportError:
raise ImportError("plot_betas_by_type() requires methylprep")
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(ArrayType(array_type), man_filepath, on_lambda=on_lambda)
LOGGER.setLevel(logging.INFO)
else:
raise FileNotFoundError("manifest file not found.")
# merge reference col, filter probes, them remove ref col(s)
orig_shape = beta_df.shape
# II, I, IR, IG, Control
mapper = manifest.data_frame.loc[:, ['probe_type','Color_Channel']]
beta_df = beta_df.merge(mapper, right_index=True, left_index=True)
figs = []
if probe_type in ('I', 'all'):
subset = beta_df[beta_df['probe_type'] == 'I']
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type I probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I probes', silent=silent, full_range=True)
if probe_type in ('II', 'all'):
subset = beta_df[beta_df['probe_type'] == 'II']
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type II probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type II probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type II probes', silent=silent, full_range=True)
if probe_type in ('IR', 'all'):
subset = beta_df[(beta_df['probe_type'] == 'I') & (beta_df['Color_Channel'] == 'Red')]
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Red (IR) probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type I Red (IR) probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Red (IR) probes', silent=silent, full_range=True)
if probe_type in ('IG', 'all'):
subset = beta_df[(beta_df['probe_type'] == 'I') & (beta_df['Color_Channel'] == 'Grn')]
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Green (IG) probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type I Green (IG) probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Green (IG) probes', silent=silent, full_range=True)
if str(array_type) != 'mouse':
if return_fig:
return figs
return
############ MOUSE ONLY ################
# TODO: control probe types #
# 'probe_type' are I, II, IR, IG and probe_type (mouse only) are 'cg','ch','uk'. | 'rs' are in controls
# mouse_probe_types are 'ch','cg','rs','uk'
mapper = pd.DataFrame(data=manifest.data_frame.index.str[:2], index=manifest.data_frame.index)
mapper = mapper.rename(columns={'IlmnID':'mouse_probe_type'})
beta_df = beta_df.merge(mapper, right_index=True, left_index=True)
if probe_type in mouse_probe_types:
subset = beta_df[beta_df['mouse_probe_type'] == probe_type]
subset = subset.drop(columns=['probe_type','Color_Channel','mouse_probe_type'])
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {probe_type} probes', return_fig=True, silent=silent, full_range=True) )
else:
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {probe_type} probes', silent=silent, full_range=True)
if probe_type == 'all':
for mouse_probe_type in mouse_probe_types:
subset = beta_df[beta_df['mouse_probe_type'] == mouse_probe_type]
subset = subset.drop(columns=['probe_type','Color_Channel','mouse_probe_type'])
if subset.shape[0] == 0:
if not silent:
LOGGER.warning("No {mouse_probe_type} probes found")
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {mouse_probe_type} probes', return_fig=True, silent=silent, full_range=True) )
else:
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {mouse_probe_type} probes', silent=silent, full_range=True)
if return_fig:
return figs
plt.show()
plt.close('all')
def plot_controls(path=None, subset='all', return_fig=False):
"""internal array QC controls (available with the `--save_control` or `--all` methylprep process option)
input:
======
path
can either be a path to the file, or a path to the folder containing a file called 'control_probes.pkl',
or it can be the dictionary of control dataframes in `control_probes.pkl`.
options:
========
subset ('staining' | 'negative' | 'hybridization' | 'extension' | 'bisulfite' |
'non-polymorphic' | 'target-removal' | 'specificity' | 'all'):
'all' will plot every control function (default)
return_fig (False)
if True, returns a list of matplotlib.pyplot figure objects INSTEAD of showing then. Used in QC ReportPDF.
if there are more than 30 samples, plots will not have sample names on x-axis.
"""
subset_options = {'staining', 'negative', 'hybridization', 'extension', 'bisulfite', 'non-polymorphic', 'target-removal', 'specificity', 'all'}
if subset not in subset_options:
raise ValueError(f"Choose one of these options for plot type: {subset_options}")
if not path:
print("You must specify a path to the control probes processed data file or folder (available with the `--save_control` methylprep process option).")
return
try:
# detect a dict of dataframes (control_probes.pkl) object
if type(path) is dict and all([type(df) is type(pd.DataFrame()) for df in path.values()]):
control = path
path = None
else:
path = Path(path)
if path.is_dir():
control = pd.read_pickle(Path(path, 'control_probes.pkl'))
elif path.is_file():
control = pd.read_pickle(path) # allows for any arbitrary filename to be used, so long as structure is same, and it is a pickle.
except Exception as e: # cannot unpack NoneType
print(e)
print("No data.")
return
mouse = True if list(control.values())[0].shape[0] == 473 else False # vs 694 controls for epic.
plotx = 'show' if len(list(control.keys())) <= 30 else None
# Create empty dataframes for red and green negative controls
control_R = pd.DataFrame(list(control.values())[0][['Control_Type','Color','Extended_Type']])
control_G = pd.DataFrame(list(control.values())[0][['Control_Type','Color','Extended_Type']])
# convert the list of DFs into one DF for each red and green channel
for sample,c in control.items():
# drop SNPS from control DF using Control_Type column.
c = c[c['Control_Type'].notna() == True]
df_red = c[['Extended_Type','Mean_Value_Red']].rename(columns={'Mean_Value_Red':sample})
df_green = c[['Extended_Type','Mean_Value_Green']].rename(columns={'Mean_Value_Green':sample})
control_R = pd.merge(left=control_R,right=df_red,on=['Extended_Type'])
control_G = pd.merge(left=control_G,right=df_green,on=['Extended_Type'])
figs = []
if subset in ('staining','all'):
stain_red = control_R[control_R['Control_Type']=='STAINING'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
stain_green = control_G[control_G['Control_Type']=='STAINING'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(stain_green.Extended_Type, stain_green.Color))
color_dict.update({k: (v if v != '-99' else 'gold') for k,v in color_dict.items()})
stain_green = stain_green.drop(columns=['Color']).set_index('Extended_Type')
stain_red = stain_red.drop(columns=['Color']).set_index('Extended_Type')
stain_red = stain_red.T
stain_green = stain_green.T
if stain_red.shape[1] == 0 or stain_green.shape[1] == 0:
LOGGER.info("No staining probes found")
else:
fig = _qc_plotter(stain_red, stain_green, color_dict, xticks=plotx, ymax=60000, title='Staining', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('negative','all'):
if mouse:
# mouse manifest defines control probes in TWO columns, just to be annoying.
neg_red = control_R[(control_R['Control_Type'] == 'NEGATIVE') & (control_R['Extended_Type'].str.startswith('neg_'))].copy().drop(columns=['Control_Type']).reset_index(drop=True)
neg_green = control_G[(control_G['Control_Type'] == 'NEGATIVE') & (control_G['Extended_Type'].str.startswith('neg_'))].copy().drop(columns=['Control_Type']).reset_index(drop=True)
neg_mouse_probe_names = list(neg_red.Extended_Type.values)
else:
neg_red = control_R[control_R['Control_Type']=='NEGATIVE'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
neg_green = control_G[control_G['Control_Type']=='NEGATIVE'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(neg_green.Extended_Type, neg_green.Color))
color_dict.update({k: (v if v != '-99' else 'Black') for k,v in color_dict.items()})
neg_green = neg_green.drop(columns=['Color']).set_index('Extended_Type')
neg_red = neg_red.drop(columns=['Color']).set_index('Extended_Type')
neg_red = neg_red.T
neg_green = neg_green.T
# note: GenomeStudio appears to only do the first 16 negative control probes
# Maybe user should be able to select which they want to see
# There is a total of 600, which is too many to plot at once
list_of_negative_controls_to_plot = ['Negative 1','Negative 2','Negative 3','Negative 4','Negative 5',
'Negative 6','Negative 7','Negative 8','Negative 9','Negative 10',
'Negative 11','Negative 12','Negative 13','Negative 14','Negative 15',
'Negative 16']
# UPDATE: picking a smattering of probes that are in both EPIC and EPIC+
list_of_negative_controls_to_plot = ['Negative 1','Negative 142','Negative 3','Negative 4','Negative 5',
'Negative 6','Negative 7','Negative 8','Negative 119','Negative 10',
'Negative 484','Negative 12','Negative 13','Negative 144','Negative 151',
'Negative 166']
probes_to_plot = list_of_negative_controls_to_plot
if mouse:
probes_to_plot = neg_mouse_probe_names[:36] # plot the first 36
dynamic_controls = [c for c in probes_to_plot if c in neg_red.columns and c in neg_green.columns]
dynamic_ymax = max([max(neg_red[dynamic_controls].max(axis=0)), max(neg_green[dynamic_controls].max(axis=0))])
dynamic_ymax = dynamic_ymax + int(0.1*dynamic_ymax)
fig = _qc_plotter(neg_red, neg_green, color_dict, columns=probes_to_plot, ymax=dynamic_ymax, xticks=plotx, title='Negative', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('hybridization','all'):
hyb_red = control_R[control_R['Control_Type']=='HYBRIDIZATION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
hyb_green = control_G[control_G['Control_Type']=='HYBRIDIZATION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(hyb_green.Extended_Type, hyb_green.Color))
hyb_green = hyb_green.drop(columns=['Color']).set_index('Extended_Type')
hyb_red = hyb_red.drop(columns=['Color']).set_index('Extended_Type')
hyb_red = hyb_red.T
hyb_green = hyb_green.T
fig = _qc_plotter(hyb_red, hyb_green, color_dict, ymax=35000, xticks=plotx, title='Hybridization', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('extension','all'):
ext_red = control_R[control_R['Control_Type']=='EXTENSION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
ext_green = control_G[control_G['Control_Type']=='EXTENSION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(ext_green.Extended_Type, ext_green.Color))
ext_green = ext_green.drop(columns=['Color']).set_index('Extended_Type')
ext_red = ext_red.drop(columns=['Color']).set_index('Extended_Type')
ext_red = ext_red.T
ext_green = ext_green.T
if ext_red.shape[1] == 0 or ext_green.shape[1] == 0:
LOGGER.info("No extension probes found")
else:
fig = _qc_plotter(ext_red, ext_green, color_dict, ymax=50000, xticks=plotx, title='Extension', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('bisulfite','all'):
bci_red = control_R[control_R['Control_Type'].isin(['BISULFITE CONVERSION I','BISULFITE CONVERSION II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
bci_green = control_G[control_G['Control_Type'].isin(['BISULFITE CONVERSION I','BISULFITE CONVERSION II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(bci_green.Extended_Type, bci_green.Color))
color_dict.update({k: (v if v != 'Both' else 'seagreen') for k,v in color_dict.items()}) # mouse has Both; others don't
bci_green = bci_green.drop(columns=['Color']).set_index('Extended_Type')
bci_red = bci_red.drop(columns=['Color']).set_index('Extended_Type')
bci_red = bci_red.T
bci_green = bci_green.T
fig = _qc_plotter(bci_red, bci_green, color_dict, ymax=30000, xticks=plotx, title='Bisulfite Conversion', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('non-polymorphic','all'):
np_red = control_R[control_R['Control_Type']=='NON-POLYMORPHIC'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
np_green = control_G[control_G['Control_Type']=='NON-POLYMORPHIC'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(np_green.Extended_Type, np_green.Color))
color_dict.update({k: (v if v != '-99' else 'Black') for k,v in color_dict.items()})
np_green = np_green.drop(columns=['Color']).set_index('Extended_Type')
np_red = np_red.drop(columns=['Color']).set_index('Extended_Type')
np_red = np_red.T
np_green = np_green.T
if np_red.shape[1] == 0 or np_green.shape[1] == 0:
LOGGER.info("No non-polymorphic probes found")
else:
fig = _qc_plotter(np_red, np_green, color_dict, ymax=30000, xticks=plotx, title='Non-polymorphic', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('target-removal','all'):
tar_red = control_R[control_R['Control_Type']=='TARGET REMOVAL'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
tar_green = control_G[control_G['Control_Type']=='TARGET REMOVAL'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(tar_green.Extended_Type, tar_green.Color))
tar_green = tar_green.drop(columns=['Color']).set_index('Extended_Type')
tar_red = tar_red.drop(columns=['Color']).set_index('Extended_Type')
tar_red = tar_red.T
tar_green = tar_green.T
if tar_red.shape[1] == 0 or tar_green.shape[1] == 0:
LOGGER.info("No target-removal probes found")
else:
fig = _qc_plotter(tar_red, tar_green, color_dict, ymax=2000, xticks=plotx, title='Target Removal', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('specificity','all'):
spec_red = control_R[control_R['Control_Type'].isin(['SPECIFICITY I','SPECIFICITY II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
spec_green = control_G[control_G['Control_Type'].isin(['SPECIFICITY I','SPECIFICITY II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(spec_green.Extended_Type, spec_green.Color))
spec_green = spec_green.drop(columns=['Color']).set_index('Extended_Type')
spec_red = spec_red.drop(columns=['Color']).set_index('Extended_Type')
spec_red = spec_red.T
spec_green = spec_green.T
fig = _qc_plotter(spec_red, spec_green, color_dict, ymax=30000, xticks=plotx, title='Specificity (Type I)', return_fig=return_fig)
if fig:
figs.append(fig)
if return_fig and figs != []:
return figs
plt.show()
plt.close('all')
def _qc_plotter(stain_red, stain_green, color_dict=None, columns=None, ymax=None, xticks='show',
title='', return_fig=False):
""" draft generic plotting function for all the control intensity QC plots.
used by plot_staining_controls()
options:
========
required: stain_red and stain_green
contains: red/green values in columns and probe characteristics in rows (transposed from control_probes.pkl format).
color_dict
{value: color-code} dictionary passed in to define which color to make each value in the index.
ymax
if defined, constrains the plot y-max values. Used to standardize view of each probe type within normal ranges.
any probe values that fall outside this range generate warnings.
columns
list of columns(probes) in stain_red and stain_green to plot (if ommitted it plots everything).
return_fig (False)
if True, returns the figure object instead of showing plot
todo:
=====
add a batch option that splits large datasets into multiple charts, so labels are readable on x-axis.
currently: if N>30, it suppresses the X-axis sample labels, which would be unreadable
"""
fig, (ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(10,8)) # was (12,10)
plt.tight_layout(w_pad=15)
plt.setp(ax1.xaxis.get_majorticklabels(), rotation=90, fontsize='small')
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90, fontsize='small')
ax1.grid(axis='both', linestyle='dotted')
ax2.grid(axis='both', linestyle='dotted')
title = title + ' ' if title != '' else title
ax1.set_title(f'{title}Green')
ax2.set_title(f'{title}Red')
if color_dict is None:
color_dict = {}
# DEBUG: control probes contain '-99 in the Color column. Breaks plot.' But resolved by plot_controls() now.
if '-99' in color_dict.values():
missing_colors = {k:v for k,v in color_dict.items() if v == '-99'}
LOGGER.warning(f"{title} has invalid colors: {missing_colors}")
color_dict.update({k:'Black' for k,v in missing_colors.items()})
if columns != None:
# TODO: ensure all columns in list are in stain_red/green first.
# failed with Barnes idats_part3 missing some probes
if (set(columns) - set(stain_red.columns) != set() or
set(columns) - set(stain_green.columns) != set()):
cols_removed = [c for c in columns if c not in stain_red or c not in stain_green]
columns = [c for c in columns if c in stain_red and c in stain_green]
LOGGER.warning(f'These probes were expected but missing from the {title}data: ({", ".join(cols_removed)})')
stain_red = stain_red.loc[:, columns]
stain_green = stain_green.loc[:, columns]
for c in stain_red.columns:
if ymax is not None and (stain_red[c] > ymax).any():
LOGGER.warning(f'Some Red {c} values exceed chart maximum and are not shown.')
if ymax is not None and (stain_green[c] > ymax).any():
LOGGER.warning(f'Some Green {c} values exceed chart maximum and are not shown.')
ax1.plot(stain_green.index,
c,
data=stain_green, label=c,
color=color_dict[c], linewidth=0, marker='o')
ax2.plot(stain_red.index,
c,
data=stain_red, label=c,
color=color_dict[c], linewidth=0, marker='o')
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='medium')
ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='medium')
if ymax != None:
ax1.set_ylim([0,ymax])
ax2.set_ylim([0,ymax])
if xticks != 'show':
#plt.xticks([]) # hide
ax1.get_xaxis().set_visible(False)
ax2.get_xaxis().set_visible(False)
if return_fig:
return fig
plt.show()
plt.close('all')
def bis_conversion_control(path_or_df, use_median=False, on_lambda=False, verbose=False):
""" GCT score: requires path to noob_meth or raw meth_values.pkl; or you can pass in a meth dataframe.
use_median: not supported yet. Always uses mean of probe values """
found_meth = False
try:
if isinstance(path_or_df, pd.DataFrame):
meth = path_or_df
found_meth = True
else:
path = Path(path_or_df)
if path.is_dir() and Path(path, 'meth_values.pkl').is_file():
meth = pd.read_pickle(Path(path, 'meth_values.pkl'))
found_meth = True
if path.is_dir() and Path(path, 'noob_meth_values.pkl').is_file() and not found_meth:
meth = pd.read_pickle(Path(path, 'noob_meth_values.pkl'))
found_meth = True
except Exception as e: # cannot unpack NoneType
print(e)
print("No data.")
return {}
if not found_meth:
raise FileNotFoundError("this requires methylated intensities in a pickle file.")
# using the number of probes in meth df to determine array
array_type, man_filepath = methylcheck.detect_array(meth, returns='filepath', on_lambda=on_lambda)
try:
from methylprep import Manifest, ArrayType
except ImportError:
raise ImportError("this function requires methylprep")
if Path.exists(man_filepath):
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(ArrayType(array_type), man_filepath, on_lambda=on_lambda)
LOGGER.setLevel(logging.INFO)
else:
# initialize and force download with filepath=None
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(ArrayType(array_type), filepath_or_buffer=None, on_lambda=on_lambda)
LOGGER.setLevel(logging.INFO)
# want meth channel data; 89203 probes
oobG_mask = set(manifest.data_frame[(manifest.data_frame['Infinium_Design_Type'] == 'I') & (manifest.data_frame['Color_Channel'] == 'Red')].index)
if str(array_type) == 'epic+':
array_type = 'epic' #file match below
# 'epic' should suffice for this test, except that probe names won't match
oobG_mask = set([probe.split('_')[0] for probe in oobG_mask]) # these probe names have extra crap on end
meth = meth.rename(index=lambda x: x.split('_')[0])
try:
from importlib import resources # py3.7+
except ImportError:
import pkg_resources
pkg_namespace = 'methylcheck.data_files'
try:
with resources.path(pkg_namespace, f'{array_type}_extC.csv') as probe_filepath:
ext_C_probes = pd.read_csv(probe_filepath)
ext_C_probes = ext_C_probes['x'].values # simple, flat list of probe cgXXX names
with resources.path(pkg_namespace, f'{array_type}_extT.csv') as probe_filepath:
ext_T_probes = pd.read_csv(probe_filepath)
ext_T_probes = ext_T_probes['x'].values
except:
probe_filepath = pkg_resources.resource_filename(pkg_namespace, f'{array_type}_extC.csv')
ext_C_probes = pd.read_csv(probe_filepath)
ext_C_probes = ext_C_probes['x'].values # simple, flat list of probe cgXXX names
probe_filepath = pkg_resources.resource_filename(pkg_namespace, f'{array_type}_extT.csv')
ext_T_probes = pd.read_csv(probe_filepath)
ext_T_probes = ext_T_probes['x'].values
ext_C = set(ext_C_probes).intersection(oobG_mask)
ext_T = set(ext_T_probes).intersection(oobG_mask)
# GCT: mean (C) / mean (T), after removing NaNs
# TEST bis_conversion_control('/Volumes/LEGX/GSE69852/idats_2021_04_12')
table = {} # keys are sentrix_ids; values are GCT scores
for sample in meth.columns:
C_mask = meth[sample].index.isin(ext_C)
C_mean = meth[sample].loc[C_mask].mean() # excludes NAN by default
T_mask = meth[sample].index.isin(ext_T)
T_mean = meth[sample].loc[T_mask].mean()
if verbose:
LOGGER.info(f"{sample}: ({int(round(C_mean))} / {int(round(T_mean))}) = GCT {round(100*C_mean/T_mean, 1)}")
table[sample] = round(100*C_mean/T_mean, 1)
return table
| 55.426148 | 209 | 0.648775 | import warnings
from pathlib import Path
import logging
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import methylcheck
from .progress_bar import *
LOGGER = logging.getLogger(__name__)
__all__ = ['run_qc', 'plot_beta_by_type', 'qc_signal_intensity', 'plot_M_vs_U', 'plot_controls', 'bis_conversion_control']
def run_qc(path):
try:
beta_df = pd.read_pickle(Path(path,'beta_values.pkl').expanduser())
controls = pd.read_pickle(Path(path,'control_probes.pkl').expanduser())
if Path(path,'meth_values.pkl').expanduser().exists() and Path(path,'unmeth_values.pkl').expanduser().exists():
meth_df = pd.read_pickle(Path(path,'meth_values.pkl').expanduser())
unmeth_df = pd.read_pickle(Path(path,'unmeth_values.pkl').expanduser())
else:
meth_df = pd.read_pickle(Path(path,'noob_meth_values.pkl').expanduser())
unmeth_df = pd.read_pickle(Path(path,'noob_unmeth_values.pkl').expanduser())
if Path(path,'poobah_values.pkl').expanduser().exists():
poobah = pd.read_pickle(Path(path,'poobah_values.pkl').expanduser())
else:
poobah = None
except FileNotFoundError:
if not Path(path).exists():
raise FileNotFoundError("Invalid path")
elif not Path(path).is_dir():
raise FileNotFoundError("Path is not a directory.")
raise FileNotFoundError("Files missing. run_qc() only works if you used `methylprep process --all` option to produce beta_values, control_probes, meth_values, and unmeth_values files.")
plot_M_vs_U(meth=meth_df, unmeth=unmeth_df, poobah=poobah)
qc_signal_intensity(meth=meth_df, unmeth=unmeth_df, poobah=poobah)
plot_controls(controls, 'all')
plot_beta_by_type(beta_df, 'all')
def qc_signal_intensity(data_containers=None, path=None, meth=None, unmeth=None, poobah=None, palette=None,
noob=True, silent=False, verbose=False, plot=True, cutoff_line=True, bad_sample_cutoff=11.5, return_fig=False):
if not path and not data_containers and type(meth) is type(None) and type(unmeth) is type(None):
print("ERROR: You must specify a path to methylprep processed data files or provide a data_containers object as input.")
return
if not isinstance(data_containers,list) and isinstance(data_containers, (str,Path)):
print("ERROR: If you want to supply a path to your processed files, use 'path=<path>'.")
return
if isinstance(path, str):
path = Path(path)
if isinstance(meth, type(None)) and isinstance(unmeth, type(None)):
meth, unmeth = _get_data(data_containers=data_containers, path=path, compare=False, noob=noob, verbose=verbose)
if (path is not None and not isinstance(poobah, pd.DataFrame)
and not isinstance(poobah, type(None))):
if poobah in (False,None):
pass
else:
if 'poobah_values.pkl' in [i.name for i in list(path.rglob('poobah_values.pkl'))]:
poobah = pd.read_pickle(list(path.rglob('poobah_values.pkl'))[0])
else:
if verbose and not silent:
LOGGER.info("Cannot load poobah_values.pkl file.")
medians = _make_qc_df(meth,unmeth)
cutoffs = (medians.mMed.values + medians.uMed.values)/2
bad_samples = medians.index[cutoffs < bad_sample_cutoff]
min_x = int(min(medians.mMed))
max_x = max(medians.mMed) + 1
min_y = int(min(medians.uMed))
max_y = max(medians.uMed) + 1
if not plot:
return {
'medians': medians,
'cutoffs': cutoffs,
'good_samples': [str(s) for s in medians.index[cutoffs >= bad_sample_cutoff]],
'bad_samples': [str(s) for s in bad_samples],
'bad_sample_cutoff': bad_sample_cutoff,
}
fig,ax = plt.subplots(figsize=(10,10))
plt.grid(color=(0.8, 0.8, 0.8), linestyle='dotted')
plt.xlabel('Meth Median Intensity (log2)', fontsize='large')
plt.ylabel('Unmeth Median Intensity (log2)', fontsize='large')
if not isinstance(poobah, pd.DataFrame):
plt.title('Log M versus U plot')
plt.scatter(x='mMed',y='uMed',data=medians[medians.index.isin(bad_samples)],label='Bad Samples',c='red')
plt.scatter(x='mMed',y='uMed',data=medians[~medians.index.isin(bad_samples)],label="Good Samples",c='black')
elif isinstance(poobah, pd.DataFrame):
plt.title('Log M versus U plot: Colors are the percent of probe failures per sample')
if poobah.isna().sum().sum() > 0:
if poobah.isna().equals(meth.isna()) and poobah.isna().equals(unmeth.isna()):
pass
else:
LOGGER.warning("Your poobah_values.pkl file contains missing values; color coding will be inaccurate.")
percent_failures = round(100*( poobah[poobah > 0.05].count() / poobah.count() ),1)
percent_failures = percent_failures.rename('probe_failure_(%)')
percent_failures_hues = percent_failures.where(~percent_failures.between(0,5), 0)
percent_failures_hues.where(~percent_failures_hues.between(5,10), 1, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(10,15), 2, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(15,20), 3, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(20,25), 4, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(25,30), 5, inplace=True)
percent_failures_hues.where(~(percent_failures_hues > 30), 6, inplace=True)
percent_failures_hues = percent_failures_hues.astype(int)
percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'20 to 25', 5:'25 to 30', 6:'>30'})
legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','20 to 25','25 to 30','>30']
try:
qc = pd.merge(left=medians,
right=percent_failures_hues,
left_on=medians.index,
right_on=percent_failures_hues.index,
how='inner')
except:
tempA = medians.reset_index(drop=True)
tempB = percent_failures_hues.reset_index(drop=True)
qc = pd.concat([tempA, tempB], axis='columns')
hues_palette = sb.color_palette("twilight", n_colors=7, desat=0.8) if palette is None else sb.color_palette(palette, n_colors=7, desat=0.8)
this = sb.scatterplot(data=qc, x="mMed", y="uMed", hue="probe_failure_(%)",
palette=hues_palette, hue_order=legend_order, legend="full")
else:
raise NotImplementedError("poobah color coding is not implemented with 'compare' option")
plt.xlim([min_x,max_x])
plt.ylim([min_y,max_y])
if cutoff_line:
x = np.linspace(6,14)
y = -1*x+(2*bad_sample_cutoff)
plt.plot(x, y, '--', lw=1, color='lightgrey', alpha=0.75, label='Cutoff')
legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')
legend.set_title("Probe failure rate (%)", prop={'size':'large'})
if return_fig:
return fig
plt.show()
plt.close('all')
if len(bad_samples) > 0:
print('List of Bad Samples')
print([str(s) for s in bad_samples])
return {
'medians': medians,
'cutoffs': cutoffs,
'good_samples': [str(s) for s in medians.index[cutoffs >= bad_sample_cutoff]],
'bad_samples': [str(s) for s in bad_samples],
'bad_sample_cutoff': bad_sample_cutoff,
}
def _make_qc_df(meth,unmeth):
mmed = pd.DataFrame(np.log2(meth.median(axis=0)),columns=['mMed'])
umed = pd.DataFrame(np.log2(unmeth.median(axis=0)),columns=['uMed'])
qc = pd.merge(left=mmed,
right=umed,
left_on=mmed.index,
right_on=umed.index,
how='inner').set_index('key_0',drop=True)
qc.index.name = None
return qc
def _get_data(data_containers=None, path=None, compare=False, noob=True, verbose=True):
if data_containers:
meth = pd.DataFrame(index=data_containers[0]._SampleDataContainer__data_frame.index)
unmeth = pd.DataFrame(index=data_containers[0]._SampleDataContainer__data_frame.index)
for i,c in enumerate(data_containers):
sample = data_containers[i].sample
m = c._SampleDataContainer__data_frame.rename(columns={'meth':sample})
u = c._SampleDataContainer__data_frame.rename(columns={'unmeth':sample})
meth = pd.merge(left=meth,right=m[sample],left_on='IlmnID',right_on='IlmnID',)
unmeth = pd.merge(left=unmeth,right=u[sample],left_on='IlmnID',right_on='IlmnID')
elif path:
n = 'noob_' if noob else ''
if (noob and Path(path, f'{n}meth_values.pkl').exists() and
Path(path, f'{n}unmeth_values.pkl').exists()):
_meth = pd.read_pickle(Path(path, f'{n}meth_values.pkl'))
_unmeth = pd.read_pickle(Path(path, f'{n}unmeth_values.pkl'))
return _meth, _unmeth
elif Path(path, 'meth_values.pkl').exists() and Path(path,'unmeth_values.pkl').exists() and not compare:
_meth = pd.read_pickle(Path(path, 'meth_values.pkl'))
_unmeth = pd.read_pickle(Path(path, 'unmeth_values.pkl'))
return _meth, _unmeth
elif (compare and
Path(path, 'meth_values.pkl').exists() and
Path(path, 'unmeth_values.pkl').exists() and
Path(path, f'{n}meth_values.pkl').exists() and
Path(path, f'{n}unmeth_values.pkl').exists()):
meth = pd.read_pickle(Path(path, 'meth_values.pkl'))
unmeth = pd.read_pickle(Path(path, 'unmeth_values.pkl'))
_meth = pd.read_pickle(Path(path, f'{n}meth_values.pkl'))
_unmeth = pd.read_pickle(Path(path, f'{n}unmeth_values.pkl'))
return meth, unmeth, _meth, _unmeth
else:
sample_filenames = []
csvs = []
files_found = False
for file in tqdm(Path(path).expanduser().rglob('*_processed.csv'), desc='Loading files', total=len(list(Path(path).expanduser().rglob('*_processed.csv')))):
this = pd.read_csv(file)
files_found = True
if f'{n}meth' in this.columns and f'{n}unmeth' in this.columns:
csvs.append(this)
sample_filenames.append(str(file.stem).replace('_processed',''))
# note, this doesn't give a clear error message if using compare and missing uncorrected data.
if verbose and len(csvs) > 0:
print(f"{len(csvs)} processed samples found.")
if csvs != []:
meth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n}meth']})
unmeth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n}unmeth']})
meth.set_index('IlmnID', inplace=True)
unmeth.set_index('IlmnID', inplace=True)
if compare:
n2 = '' if noob else 'noob_'
_meth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n2}meth']})
_unmeth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n2}unmeth']})
_meth.set_index('IlmnID', inplace=True)
_unmeth.set_index('IlmnID', inplace=True)
for idx, sample in tqdm(enumerate(csvs[1:],1), desc='Samples', total=len(csvs)):
meth = pd.merge(left=meth, right=sample[f'{n}meth'], left_on='IlmnID', right_on=sample['IlmnID'])
meth = meth.rename(columns={f'{n}meth': sample_filenames[idx]})
unmeth = pd.merge(left=unmeth, right=sample[f'{n}unmeth'], left_on='IlmnID', right_on=sample['IlmnID'])
unmeth = unmeth.rename(columns={f'{n}unmeth': sample_filenames[idx]})
if compare:
_meth = pd.merge(left=_meth, right=sample[f'{n2}meth'], left_on='IlmnID', right_on=sample['IlmnID'])
_meth = _meth.rename(columns={f'{n2}meth': sample_filenames[idx]})
_unmeth = pd.merge(left=_unmeth, right=sample[f'{n2}unmeth'], left_on='IlmnID', right_on=sample['IlmnID'])
_unmeth = _unmeth.rename(columns={f'{n2}unmeth': sample_filenames[idx]})
else:
if verbose:
print(f"{len(csvs)} processed samples found in {path} using NOOB: {noob}.")
if files_found:
data_columns = "NOOB meth/unmeth" if noob else "non-NOOB-corrected meth/unmeth"
print(f"processed files found, but did not contain the right data ({data_columns})")
return
if compare:
return meth, unmeth, _meth, _unmeth
return meth, unmeth
def plot_M_vs_U(data_containers_or_path=None, meth=None, unmeth=None, poobah=None,
noob=True, silent=False, verbose=False, plot=True, compare=False, return_fig=False, palette=None,
cutoff_line=True):
try:
if Path(data_containers_or_path).exists():
path = Path(data_containers_or_path)
else:
path = None
except TypeError:
path = None
if isinstance(data_containers_or_path, Path):
path = data_containers_or_path
data_containers = None
elif isinstance(path, Path):
data_containers = None
else:
path = None
data_containers = data_containers_or_path
if isinstance(data_containers_or_path, pd.DataFrame):
raise ValueError("M_vs_U cannot plot a dataframe of processed data; requires meth and unmeth values.")
if not isinstance(path, Path) and isinstance(data_containers, type(None)) and not isinstance(meth, pd.DataFrame) and not isinstance(unmeth, pd.DataFrame):
print("You must specify a path to methylprep processed data files, or provide a data_containers object as input, or pass in meth and unmeth dataframes.")
return
elif isinstance(meth,type(None)) and isinstance(unmeth,type(None)):
try:
if compare:
meth, unmeth, _meth, _unmeth = _get_data(data_containers, path, compare=compare, noob=noob)
else:
meth, unmeth = _get_data(data_containers, path, compare=compare, noob=noob)
except Exception as e:
print(e)
print("No processed data found.")
return
if isinstance(poobah,bool) and poobah == False:
poobah_df = None
elif isinstance(poobah, pd.DataFrame):
poobah_df = poobah
poobah = True
else:
poobah_df = None
if isinstance(path, Path) and 'poobah_values.pkl' in [i.name for i in list(path.rglob('poobah_values.pkl'))]:
poobah_df = pd.read_pickle(list(path.rglob('poobah_values.pkl'))[0])
poobah=True
else:
if poobah_df is None:
LOGGER.warning("Did not find a poobah_values.pkl file; unable to color-code plot.")
poobah = False #user may have set this to True or None, but changing params to fit data.
if verbose and not silent and isinstance(poobah_df,pd.DataFrame):
LOGGER.info("Using poobah_values.pkl")
#palette options to pass in: "CMRmap" "flare" "twilight" "Blues", "tab10"
hues_palette = sb.color_palette("twilight", n_colors=7, desat=0.8) if palette is None else sb.color_palette(palette, n_colors=7, desat=0.8)
if poobah is not False and isinstance(poobah_df, pd.DataFrame) and not compare:
if poobah_df.isna().sum().sum() > 0:
if poobah_df.isna().equals(meth.isna()) and poobah_df.isna().equals(unmeth.isna()):
pass # not a problem if the SAME probes are excluded in all dataframes
else:
LOGGER.warning("Your poobah_values.pkl file contains missing values; color coding will be inaccurate.")
percent_failures = round(100*( poobah_df[poobah_df > 0.05].count() / poobah_df.count() ),1)
percent_failures = percent_failures.rename('probe_failure (%)')
meth_med = meth.median()
unmeth_med = unmeth.median()
# Series.where will replace the stuff that is False, so you have to negate it.
percent_failures_hues = percent_failures.where(~percent_failures.between(0,5), 0)
percent_failures_hues.where(~percent_failures_hues.between(5,10), 1, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(10,15), 2, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(15,20), 3, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(20,25), 4, inplace=True)
percent_failures_hues.where(~percent_failures_hues.between(25,30), 5, inplace=True)
percent_failures_hues.where(~(percent_failures_hues > 30), 6, inplace=True)
percent_failures_hues = percent_failures_hues.astype(int)
#sizes = percent_failures_hues.copy()
percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'20 to 25', 5:'25 to 30', 6:'>30'})
legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','20 to 25','25 to 30','>30']
df = pd.concat([
meth_med.rename('meth'),
unmeth_med.rename('unmeth'),
percent_failures_hues],
#sizes.rename('size')],
axis=1)
if plot:
# plot it
fig,ax = plt.subplots(figsize=(10,10))
plt.grid(color=(0.8, 0.8, 0.8), linestyle='dotted')
if poobah and not compare:
this = sb.scatterplot(data=df, x="meth", y="unmeth", hue="probe_failure (%)",
palette=hues_palette, hue_order=legend_order, legend="full") # size="size"
legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')
legend.set_title("Probe failure rate (%)", prop={'size':'large'})
elif not poobah and not compare:
this = sb.scatterplot(x=meth.median(),y=unmeth.median(),s=75)
elif compare:
data_df = pd.DataFrame(data={
'meth': meth.median(),
'unmeth': unmeth.median()
})
data_df["hue"] = "Raw intensity"
data_df2 = pd.DataFrame(data={ # the NOOB version
'meth': _meth.median(),
'unmeth': _unmeth.median()
})
# each data set should have same samples in same order, so label_lookup will work for both hues
label_lookup = {index_val: chr(i+65) if i <= 26 else str(i-26) for i,index_val in enumerate(data_df.index)}
data_df2['hue'] = "Corrected intensity"
data_df = data_df.append(data_df2)
del data_df2
legend_order = ["Raw intensity", "Corrected intensity"]
hues_palette = sb.color_palette("tab10", n_colors=2) if palette is None else sb.color_palette(palette, n_colors=2)
this = sb.scatterplot(data=data_df, x='meth', y='unmeth', hue='hue', palette=hues_palette)
# FINALLY, label ALL points so you can compare the shifts
for index_val, row in data_df.iterrows():
color_code = {"Raw intensity":"blue", "Corrected intensity": "darkorange"}
#proxy_label = chr(i+65) if i <= 52 else str(i-65)
proxy_label = label_lookup.get(index_val,"-1")
plt.text(x=row["meth"]+7, y=row["unmeth"]+7, s=proxy_label,
fontdict={'color':color_code.get(row["hue"], "black"), 'size':8, 'family':'sans-serif'})
#bbox=dict(facecolor=’yellow’,alpha=0.5))
if poobah and not compare:
plt.title('M versus U plot: Colors are the percent of probe failures per sample')
elif compare:
plt.title('M versus U plot: Showing effect of processing fluorescence intensities')
else:
plt.title('M versus U plot')
plt.xlabel('Median Methylated Intensity', fontsize='large')
plt.ylabel('Median Unmethylated Intensity', fontsize='large')
# add diagonal line
if cutoff_line:
line = {'y': this.axes.get_ylim(), 'x': this.axes.get_xlim()}
sx = []
sy = []
for i in range(1000):
sx.append(line['x'][0] + i/1000*(line['x'][1] - line['x'][0]))
sy.append(line['y'][0] + i/1000*(line['y'][1] - line['y'][0]))
this = sb.scatterplot(x=sx, y=sy, s=3, color=(0.8, 0.8, 0.8))
if poobah:
# This is necessary because legend title disappears when adding cutoff-line for some reason.
legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')
legend.set_title("Probe failure rate (%)", prop={'size':'large'})
if return_fig:
return this.get_figure()
plt.show()
plt.close('all')
else:
return {'meth_median': meth.median(), 'unmeth_median': unmeth.median()}
def plot_beta_by_type(beta_df, probe_type='all', return_fig=False, silent=False, on_lambda=False):
mouse_probe_types = ['cg','ch','uk']
probe_types = ['I', 'II', 'IR', 'IG', 'all'] # 'SnpI', 'Control' are in manifest, but not in the processed data
if probe_type not in probe_types + mouse_probe_types:
raise ValueError(f"Please specify an Infinium probe_type: ({probe_types}) to plot or, if mouse array, one of these ({mouse_probe_types}) or 'all'.")
# orient
if beta_df.shape[1] > beta_df.shape[0]:
beta_df = beta_df.transpose() # probes should be in rows.
array_type, man_filepath = methylcheck.detect_array(beta_df, returns='filepath', on_lambda=on_lambda)
# note that 'array_type' can look like string 'mouse' but only str(array_type) will match the string 'mouse'
if Path.exists(man_filepath):
try:
from methylprep import Manifest, ArrayType
except ImportError:
raise ImportError("plot_betas_by_type() requires methylprep")
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(ArrayType(array_type), man_filepath, on_lambda=on_lambda)
LOGGER.setLevel(logging.INFO)
else:
raise FileNotFoundError("manifest file not found.")
# merge reference col, filter probes, them remove ref col(s)
orig_shape = beta_df.shape
# II, I, IR, IG, Control
mapper = manifest.data_frame.loc[:, ['probe_type','Color_Channel']]
beta_df = beta_df.merge(mapper, right_index=True, left_index=True)
figs = []
if probe_type in ('I', 'all'):
subset = beta_df[beta_df['probe_type'] == 'I']
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type I probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I probes', silent=silent, full_range=True)
if probe_type in ('II', 'all'):
subset = beta_df[beta_df['probe_type'] == 'II']
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type II probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type II probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type II probes', silent=silent, full_range=True)
if probe_type in ('IR', 'all'):
subset = beta_df[(beta_df['probe_type'] == 'I') & (beta_df['Color_Channel'] == 'Red')]
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Red (IR) probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type I Red (IR) probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Red (IR) probes', silent=silent, full_range=True)
if probe_type in ('IG', 'all'):
subset = beta_df[(beta_df['probe_type'] == 'I') & (beta_df['Color_Channel'] == 'Grn')]
subset = subset.drop('probe_type', axis='columns')
subset = subset.drop('Color_Channel', axis='columns')
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Green (IG) probes', return_fig=True, silent=silent, full_range=True) )
else:
print(f'Found {subset.shape[0]} type I Green (IG) probes.')
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Green (IG) probes', silent=silent, full_range=True)
if str(array_type) != 'mouse':
if return_fig:
return figs
return
############ MOUSE ONLY ################
# TODO: control probe types #
# 'probe_type' are I, II, IR, IG and probe_type (mouse only) are 'cg','ch','uk'. | 'rs' are in controls
# mouse_probe_types are 'ch','cg','rs','uk'
mapper = pd.DataFrame(data=manifest.data_frame.index.str[:2], index=manifest.data_frame.index)
mapper = mapper.rename(columns={'IlmnID':'mouse_probe_type'})
beta_df = beta_df.merge(mapper, right_index=True, left_index=True)
if probe_type in mouse_probe_types:
subset = beta_df[beta_df['mouse_probe_type'] == probe_type]
subset = subset.drop(columns=['probe_type','Color_Channel','mouse_probe_type'])
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {probe_type} probes', return_fig=True, silent=silent, full_range=True) )
else:
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {probe_type} probes', silent=silent, full_range=True)
if probe_type == 'all':
for mouse_probe_type in mouse_probe_types:
subset = beta_df[beta_df['mouse_probe_type'] == mouse_probe_type]
subset = subset.drop(columns=['probe_type','Color_Channel','mouse_probe_type'])
if subset.shape[0] == 0:
if not silent:
LOGGER.warning("No {mouse_probe_type} probes found")
if return_fig:
figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {mouse_probe_type} probes', return_fig=True, silent=silent, full_range=True) )
else:
methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {mouse_probe_type} probes', silent=silent, full_range=True)
if return_fig:
return figs
plt.show()
plt.close('all')
def plot_controls(path=None, subset='all', return_fig=False):
subset_options = {'staining', 'negative', 'hybridization', 'extension', 'bisulfite', 'non-polymorphic', 'target-removal', 'specificity', 'all'}
if subset not in subset_options:
raise ValueError(f"Choose one of these options for plot type: {subset_options}")
if not path:
print("You must specify a path to the control probes processed data file or folder (available with the `--save_control` methylprep process option).")
return
try:
# detect a dict of dataframes (control_probes.pkl) object
if type(path) is dict and all([type(df) is type(pd.DataFrame()) for df in path.values()]):
control = path
path = None
else:
path = Path(path)
if path.is_dir():
control = pd.read_pickle(Path(path, 'control_probes.pkl'))
elif path.is_file():
control = pd.read_pickle(path) # allows for any arbitrary filename to be used, so long as structure is same, and it is a pickle.
except Exception as e: # cannot unpack NoneType
print(e)
print("No data.")
return
mouse = True if list(control.values())[0].shape[0] == 473 else False # vs 694 controls for epic.
plotx = 'show' if len(list(control.keys())) <= 30 else None
# Create empty dataframes for red and green negative controls
control_R = pd.DataFrame(list(control.values())[0][['Control_Type','Color','Extended_Type']])
control_G = pd.DataFrame(list(control.values())[0][['Control_Type','Color','Extended_Type']])
# convert the list of DFs into one DF for each red and green channel
for sample,c in control.items():
# drop SNPS from control DF using Control_Type column.
c = c[c['Control_Type'].notna() == True]
df_red = c[['Extended_Type','Mean_Value_Red']].rename(columns={'Mean_Value_Red':sample})
df_green = c[['Extended_Type','Mean_Value_Green']].rename(columns={'Mean_Value_Green':sample})
control_R = pd.merge(left=control_R,right=df_red,on=['Extended_Type'])
control_G = pd.merge(left=control_G,right=df_green,on=['Extended_Type'])
figs = []
if subset in ('staining','all'):
stain_red = control_R[control_R['Control_Type']=='STAINING'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
stain_green = control_G[control_G['Control_Type']=='STAINING'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(stain_green.Extended_Type, stain_green.Color))
color_dict.update({k: (v if v != '-99' else 'gold') for k,v in color_dict.items()})
stain_green = stain_green.drop(columns=['Color']).set_index('Extended_Type')
stain_red = stain_red.drop(columns=['Color']).set_index('Extended_Type')
stain_red = stain_red.T
stain_green = stain_green.T
if stain_red.shape[1] == 0 or stain_green.shape[1] == 0:
LOGGER.info("No staining probes found")
else:
fig = _qc_plotter(stain_red, stain_green, color_dict, xticks=plotx, ymax=60000, title='Staining', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('negative','all'):
if mouse:
# mouse manifest defines control probes in TWO columns, just to be annoying.
neg_red = control_R[(control_R['Control_Type'] == 'NEGATIVE') & (control_R['Extended_Type'].str.startswith('neg_'))].copy().drop(columns=['Control_Type']).reset_index(drop=True)
neg_green = control_G[(control_G['Control_Type'] == 'NEGATIVE') & (control_G['Extended_Type'].str.startswith('neg_'))].copy().drop(columns=['Control_Type']).reset_index(drop=True)
neg_mouse_probe_names = list(neg_red.Extended_Type.values)
else:
neg_red = control_R[control_R['Control_Type']=='NEGATIVE'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
neg_green = control_G[control_G['Control_Type']=='NEGATIVE'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(neg_green.Extended_Type, neg_green.Color))
color_dict.update({k: (v if v != '-99' else 'Black') for k,v in color_dict.items()})
neg_green = neg_green.drop(columns=['Color']).set_index('Extended_Type')
neg_red = neg_red.drop(columns=['Color']).set_index('Extended_Type')
neg_red = neg_red.T
neg_green = neg_green.T
# note: GenomeStudio appears to only do the first 16 negative control probes
# Maybe user should be able to select which they want to see
# There is a total of 600, which is too many to plot at once
list_of_negative_controls_to_plot = ['Negative 1','Negative 2','Negative 3','Negative 4','Negative 5',
'Negative 6','Negative 7','Negative 8','Negative 9','Negative 10',
'Negative 11','Negative 12','Negative 13','Negative 14','Negative 15',
'Negative 16']
# UPDATE: picking a smattering of probes that are in both EPIC and EPIC+
list_of_negative_controls_to_plot = ['Negative 1','Negative 142','Negative 3','Negative 4','Negative 5',
'Negative 6','Negative 7','Negative 8','Negative 119','Negative 10',
'Negative 484','Negative 12','Negative 13','Negative 144','Negative 151',
'Negative 166']
probes_to_plot = list_of_negative_controls_to_plot
if mouse:
probes_to_plot = neg_mouse_probe_names[:36] # plot the first 36
dynamic_controls = [c for c in probes_to_plot if c in neg_red.columns and c in neg_green.columns]
dynamic_ymax = max([max(neg_red[dynamic_controls].max(axis=0)), max(neg_green[dynamic_controls].max(axis=0))])
dynamic_ymax = dynamic_ymax + int(0.1*dynamic_ymax)
fig = _qc_plotter(neg_red, neg_green, color_dict, columns=probes_to_plot, ymax=dynamic_ymax, xticks=plotx, title='Negative', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('hybridization','all'):
hyb_red = control_R[control_R['Control_Type']=='HYBRIDIZATION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
hyb_green = control_G[control_G['Control_Type']=='HYBRIDIZATION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(hyb_green.Extended_Type, hyb_green.Color))
hyb_green = hyb_green.drop(columns=['Color']).set_index('Extended_Type')
hyb_red = hyb_red.drop(columns=['Color']).set_index('Extended_Type')
hyb_red = hyb_red.T
hyb_green = hyb_green.T
fig = _qc_plotter(hyb_red, hyb_green, color_dict, ymax=35000, xticks=plotx, title='Hybridization', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('extension','all'):
ext_red = control_R[control_R['Control_Type']=='EXTENSION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
ext_green = control_G[control_G['Control_Type']=='EXTENSION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(ext_green.Extended_Type, ext_green.Color))
ext_green = ext_green.drop(columns=['Color']).set_index('Extended_Type')
ext_red = ext_red.drop(columns=['Color']).set_index('Extended_Type')
ext_red = ext_red.T
ext_green = ext_green.T
if ext_red.shape[1] == 0 or ext_green.shape[1] == 0:
LOGGER.info("No extension probes found")
else:
fig = _qc_plotter(ext_red, ext_green, color_dict, ymax=50000, xticks=plotx, title='Extension', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('bisulfite','all'):
bci_red = control_R[control_R['Control_Type'].isin(['BISULFITE CONVERSION I','BISULFITE CONVERSION II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
bci_green = control_G[control_G['Control_Type'].isin(['BISULFITE CONVERSION I','BISULFITE CONVERSION II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(bci_green.Extended_Type, bci_green.Color))
color_dict.update({k: (v if v != 'Both' else 'seagreen') for k,v in color_dict.items()}) # mouse has Both; others don't
bci_green = bci_green.drop(columns=['Color']).set_index('Extended_Type')
bci_red = bci_red.drop(columns=['Color']).set_index('Extended_Type')
bci_red = bci_red.T
bci_green = bci_green.T
fig = _qc_plotter(bci_red, bci_green, color_dict, ymax=30000, xticks=plotx, title='Bisulfite Conversion', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('non-polymorphic','all'):
np_red = control_R[control_R['Control_Type']=='NON-POLYMORPHIC'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
np_green = control_G[control_G['Control_Type']=='NON-POLYMORPHIC'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(np_green.Extended_Type, np_green.Color))
color_dict.update({k: (v if v != '-99' else 'Black') for k,v in color_dict.items()})
np_green = np_green.drop(columns=['Color']).set_index('Extended_Type')
np_red = np_red.drop(columns=['Color']).set_index('Extended_Type')
np_red = np_red.T
np_green = np_green.T
if np_red.shape[1] == 0 or np_green.shape[1] == 0:
LOGGER.info("No non-polymorphic probes found")
else:
fig = _qc_plotter(np_red, np_green, color_dict, ymax=30000, xticks=plotx, title='Non-polymorphic', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('target-removal','all'):
tar_red = control_R[control_R['Control_Type']=='TARGET REMOVAL'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
tar_green = control_G[control_G['Control_Type']=='TARGET REMOVAL'].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(tar_green.Extended_Type, tar_green.Color))
tar_green = tar_green.drop(columns=['Color']).set_index('Extended_Type')
tar_red = tar_red.drop(columns=['Color']).set_index('Extended_Type')
tar_red = tar_red.T
tar_green = tar_green.T
if tar_red.shape[1] == 0 or tar_green.shape[1] == 0:
LOGGER.info("No target-removal probes found")
else:
fig = _qc_plotter(tar_red, tar_green, color_dict, ymax=2000, xticks=plotx, title='Target Removal', return_fig=return_fig)
if fig:
figs.append(fig)
if subset in ('specificity','all'):
spec_red = control_R[control_R['Control_Type'].isin(['SPECIFICITY I','SPECIFICITY II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
spec_green = control_G[control_G['Control_Type'].isin(['SPECIFICITY I','SPECIFICITY II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)
color_dict = dict(zip(spec_green.Extended_Type, spec_green.Color))
spec_green = spec_green.drop(columns=['Color']).set_index('Extended_Type')
spec_red = spec_red.drop(columns=['Color']).set_index('Extended_Type')
spec_red = spec_red.T
spec_green = spec_green.T
fig = _qc_plotter(spec_red, spec_green, color_dict, ymax=30000, xticks=plotx, title='Specificity (Type I)', return_fig=return_fig)
if fig:
figs.append(fig)
if return_fig and figs != []:
return figs
plt.show()
plt.close('all')
def _qc_plotter(stain_red, stain_green, color_dict=None, columns=None, ymax=None, xticks='show',
title='', return_fig=False):
fig, (ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(10,8))
plt.tight_layout(w_pad=15)
plt.setp(ax1.xaxis.get_majorticklabels(), rotation=90, fontsize='small')
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90, fontsize='small')
ax1.grid(axis='both', linestyle='dotted')
ax2.grid(axis='both', linestyle='dotted')
title = title + ' ' if title != '' else title
ax1.set_title(f'{title}Green')
ax2.set_title(f'{title}Red')
if color_dict is None:
color_dict = {}
if '-99' in color_dict.values():
missing_colors = {k:v for k,v in color_dict.items() if v == '-99'}
LOGGER.warning(f"{title} has invalid colors: {missing_colors}")
color_dict.update({k:'Black' for k,v in missing_colors.items()})
if columns != None:
if (set(columns) - set(stain_red.columns) != set() or
set(columns) - set(stain_green.columns) != set()):
cols_removed = [c for c in columns if c not in stain_red or c not in stain_green]
columns = [c for c in columns if c in stain_red and c in stain_green]
LOGGER.warning(f'These probes were expected but missing from the {title}data: ({", ".join(cols_removed)})')
stain_red = stain_red.loc[:, columns]
stain_green = stain_green.loc[:, columns]
for c in stain_red.columns:
if ymax is not None and (stain_red[c] > ymax).any():
LOGGER.warning(f'Some Red {c} values exceed chart maximum and are not shown.')
if ymax is not None and (stain_green[c] > ymax).any():
LOGGER.warning(f'Some Green {c} values exceed chart maximum and are not shown.')
ax1.plot(stain_green.index,
c,
data=stain_green, label=c,
color=color_dict[c], linewidth=0, marker='o')
ax2.plot(stain_red.index,
c,
data=stain_red, label=c,
color=color_dict[c], linewidth=0, marker='o')
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='medium')
ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='medium')
if ymax != None:
ax1.set_ylim([0,ymax])
ax2.set_ylim([0,ymax])
if xticks != 'show':
ax1.get_xaxis().set_visible(False)
ax2.get_xaxis().set_visible(False)
if return_fig:
return fig
plt.show()
plt.close('all')
def bis_conversion_control(path_or_df, use_median=False, on_lambda=False, verbose=False):
found_meth = False
try:
if isinstance(path_or_df, pd.DataFrame):
meth = path_or_df
found_meth = True
else:
path = Path(path_or_df)
if path.is_dir() and Path(path, 'meth_values.pkl').is_file():
meth = pd.read_pickle(Path(path, 'meth_values.pkl'))
found_meth = True
if path.is_dir() and Path(path, 'noob_meth_values.pkl').is_file() and not found_meth:
meth = pd.read_pickle(Path(path, 'noob_meth_values.pkl'))
found_meth = True
except Exception as e:
print(e)
print("No data.")
return {}
if not found_meth:
raise FileNotFoundError("this requires methylated intensities in a pickle file.")
array_type, man_filepath = methylcheck.detect_array(meth, returns='filepath', on_lambda=on_lambda)
try:
from methylprep import Manifest, ArrayType
except ImportError:
raise ImportError("this function requires methylprep")
if Path.exists(man_filepath):
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(ArrayType(array_type), man_filepath, on_lambda=on_lambda)
LOGGER.setLevel(logging.INFO)
else:
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(ArrayType(array_type), filepath_or_buffer=None, on_lambda=on_lambda)
LOGGER.setLevel(logging.INFO)
oobG_mask = set(manifest.data_frame[(manifest.data_frame['Infinium_Design_Type'] == 'I') & (manifest.data_frame['Color_Channel'] == 'Red')].index)
if str(array_type) == 'epic+':
array_type = 'epic'
oobG_mask = set([probe.split('_')[0] for probe in oobG_mask]) # these probe names have extra crap on end
meth = meth.rename(index=lambda x: x.split('_')[0])
try:
from importlib import resources # py3.7+
except ImportError:
import pkg_resources
pkg_namespace = 'methylcheck.data_files'
try:
with resources.path(pkg_namespace, f'{array_type}_extC.csv') as probe_filepath:
ext_C_probes = pd.read_csv(probe_filepath)
ext_C_probes = ext_C_probes['x'].values # simple, flat list of probe cgXXX names
with resources.path(pkg_namespace, f'{array_type}_extT.csv') as probe_filepath:
ext_T_probes = pd.read_csv(probe_filepath)
ext_T_probes = ext_T_probes['x'].values
except:
probe_filepath = pkg_resources.resource_filename(pkg_namespace, f'{array_type}_extC.csv')
ext_C_probes = pd.read_csv(probe_filepath)
ext_C_probes = ext_C_probes['x'].values # simple, flat list of probe cgXXX names
probe_filepath = pkg_resources.resource_filename(pkg_namespace, f'{array_type}_extT.csv')
ext_T_probes = pd.read_csv(probe_filepath)
ext_T_probes = ext_T_probes['x'].values
ext_C = set(ext_C_probes).intersection(oobG_mask)
ext_T = set(ext_T_probes).intersection(oobG_mask)
# GCT: mean (C) / mean (T), after removing NaNs
# TEST bis_conversion_control('/Volumes/LEGX/GSE69852/idats_2021_04_12')
table = {} # keys are sentrix_ids; values are GCT scores
for sample in meth.columns:
C_mask = meth[sample].index.isin(ext_C)
C_mean = meth[sample].loc[C_mask].mean() # excludes NAN by default
T_mask = meth[sample].index.isin(ext_T)
T_mean = meth[sample].loc[T_mask].mean()
if verbose:
LOGGER.info(f"{sample}: ({int(round(C_mean))} / {int(round(T_mean))}) = GCT {round(100*C_mean/T_mean, 1)}")
table[sample] = round(100*C_mean/T_mean, 1)
return table
| true | true |
1c4722894dd64b399061cba6ce7c56519333d6f2 | 3,378 | py | Python | simscale_sdk/models/symmetry_vbc.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | 8 | 2021-01-22T13:41:03.000Z | 2022-01-03T09:00:10.000Z | simscale_sdk/models/symmetry_vbc.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | null | null | null | simscale_sdk/models/symmetry_vbc.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | 3 | 2021-03-18T15:52:52.000Z | 2022-01-03T08:59:30.000Z | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class SymmetryVBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
def __init__(self, type='SYMMETRY', local_vars_configuration=None): # noqa: E501
"""SymmetryVBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = None
self.type = type
@property
def type(self):
"""Gets the type of this SymmetryVBC. # noqa: E501
Schema name: SymmetryVBC # noqa: E501
:return: The type of this SymmetryVBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this SymmetryVBC.
Schema name: SymmetryVBC # noqa: E501
:param type: The type of this SymmetryVBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SymmetryVBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SymmetryVBC):
return True
return self.to_dict() != other.to_dict()
| 27.688525 | 95 | 0.5672 |
import pprint
import re
import six
from simscale_sdk.configuration import Configuration
class SymmetryVBC(object):
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
def __init__(self, type='SYMMETRY', local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = None
self.type = type
@property
def type(self):
return self._type
@type.setter
def type(self, type):
if self.local_vars_configuration.client_side_validation and type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SymmetryVBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, SymmetryVBC):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c47230e511165113e34bda546e7409a06011882 | 382 | py | Python | mtp_api/apps/disbursement/migrations/0012_index_prisoner_number.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 5 | 2016-01-05T12:21:35.000Z | 2020-10-28T17:06:02.000Z | mtp_api/apps/disbursement/migrations/0012_index_prisoner_number.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 209 | 2015-06-12T09:39:41.000Z | 2022-03-21T16:01:19.000Z | mtp_api/apps/disbursement/migrations/0012_index_prisoner_number.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 1 | 2021-04-11T06:19:23.000Z | 2021-04-11T06:19:23.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disbursement', '0011_disbursement_natural_ordering'),
]
operations = [
migrations.AlterField(
model_name='disbursement',
name='prisoner_number',
field=models.CharField(db_index=True, max_length=250),
),
]
| 25.466667 | 66 | 0.63089 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disbursement', '0011_disbursement_natural_ordering'),
]
operations = [
migrations.AlterField(
model_name='disbursement',
name='prisoner_number',
field=models.CharField(db_index=True, max_length=250),
),
]
| true | true |
1c4723608719c6c2a1db977050fea496f0778717 | 348 | py | Python | xradar/__init__.py | mgrover1/pyart-xarray-sandbox | 89e5cc8079a0f7d1ed62be882710e907009d2523 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:54:21.000Z | 2022-03-30T07:54:21.000Z | xradar/__init__.py | mgrover1/pyart-xarray-sandbox | 89e5cc8079a0f7d1ed62be882710e907009d2523 | [
"Apache-2.0"
] | null | null | null | xradar/__init__.py | mgrover1/pyart-xarray-sandbox | 89e5cc8079a0f7d1ed62be882710e907009d2523 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# flake8: noqa
""" Top-level module. """
from pkg_resources import DistributionNotFound, get_distribution
from .main import create_dataset_from_sweep, convert_to_xradar
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound: # pragma: no cover
__version__ = 'unknown' # pragma: no cover | 31.636364 | 64 | 0.775862 |
from pkg_resources import DistributionNotFound, get_distribution
from .main import create_dataset_from_sweep, convert_to_xradar
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = 'unknown' | true | true |
1c4723879bb3f58b9c0ed9d4c2431b19c7c7ae96 | 5,977 | py | Python | src/main/python/smv/smvschema.py | ninjapapa/SMV2 | 42cf9f176c3ec0bed61f66fbf859c18d97027dd6 | [
"Apache-2.0"
] | null | null | null | src/main/python/smv/smvschema.py | ninjapapa/SMV2 | 42cf9f176c3ec0bed61f66fbf859c18d97027dd6 | [
"Apache-2.0"
] | 34 | 2022-02-26T04:27:34.000Z | 2022-03-29T23:05:47.000Z | src/main/python/smv/smvschema.py | ninjapapa/SMV2 | 42cf9f176c3ec0bed61f66fbf859c18d97027dd6 | [
"Apache-2.0"
] | null | null | null | #
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
import pyspark.sql.types as T
from smv.error import SmvRuntimeError
from smv.utils import is_string
from smv.csv_attributes import CsvAttributes
# make it as a class with spark-schema, attrs (consider date, time formats as attr)
class SmvSchema(object):
"""
"""
def __init__(self, _schema):
if is_string(_schema):
(s, a) = self._fullStrToSchema(_schema)
elif isinstance(_schema, T.StructType):
(s, a) = (
_schema,
CsvAttributes(), # Default csv attributes
)
else:
raise SmvRuntimeError("Unsupported schema type: {}".format(type(_schema)))
self.schema = s
self.attributes = a
def updateAttrs(self, attrs):
self.attributes.update(attrs)
return self
def _strToStructField(self, fieldStr):
# *? is for non-greedy match
pattern = re.compile(r"""\s*(?P<name>[^:]*?)\s*: # Col Name part
\s*(?P<dtype>[^@]*?)\s* # Type part
(@metadata=(?P<meta>.*))? # Meta if any
\Z #end of string""", re.VERBOSE)
match = pattern.match(fieldStr)
name = match.group('name')
dtype = match.group('dtype')
meta = match.group('meta') or "{}"
# Timestamp, date, decimal
dfmtStr = None
tfmtStr = None
if (re.match(r"[Dd]ecimal", dtype)):
dpat = re.compile(r"""[Dd]ecimal(\[ *(?P<precision>\d+) *(, *(?P<scale>\d+) *)?\])?""")
dmatch = dpat.match(dtype)
precision = dmatch.group('precision') or 10
scale = dmatch.group('scale') or 0
dtypeStr = "decimal({},{})".format(precision, scale)
elif (re.match(r"[Dd]ate", dtype)):
dmatch = re.match(r"[Dd]ate(\[(?P<fmt>.+)\])?", dtype)
dfmtStr = dmatch.group('fmt')
dtypeStr = "date"
elif (re.match(r"[Tt]imestamp", dtype)):
dmatch = re.match(r"[Tt]imestamp(\[(?P<fmt>.+)\])?", dtype)
tfmtStr = dmatch.group('fmt')
dtypeStr = "timestamp"
elif (re.match(r"[Ss]tring", dtype)):
# smv allow String[,_SmvStrNull_] type of value. Ignor here
dtypeStr = "string"
else:
dtypeStr = dtype.lower()
fieldJson = {
"name": name,
"type": dtypeStr,
"nullable": True,
"metadata": json.loads(meta)
}
field = T.StructField.fromJson(fieldJson)
return (field, dfmtStr, tfmtStr)
def _strToAttr(self, attrStr):
pattern = re.compile(r"@\s*(?P<name>\S*)\s*=\s*(?P<value>\S*)\s*")
match = pattern.match(attrStr)
name = match.group('name')
value = match.group('value')
return (name, value)
def _strListToSchema(self, smvStrs):
no_comm = [re.sub(';[ \t]*$', '', r).strip() for r in smvStrs if not (re.match(r"^(//|#).*$", r) or re.match(r"^[ \t]*$", r))]
attrStrs = [s for s in no_comm if s.startswith("@")]
fieldStrs = [s for s in no_comm if not s.startswith("@")]
attrs = dict([self._strToAttr(a) for a in attrStrs])
fieldlist = []
dfmtlist = []
tfmtlist = []
for s in fieldStrs:
(field, dfmt, tfmt) = self._strToStructField(s)
fieldlist.append(field)
if dfmt:
dfmtlist.append(dfmt)
if tfmt:
tfmtlist.append(tfmt)
if len(set(dfmtlist)) > 1:
raise SmvRuntimeError("Date type has multiple formats: {}".format(set(dfmtlist)))
elif len(set(dfmtlist)) == 1:
dateFormat = dfmtlist[0]
else:
dateFormat = None
if len(set(tfmtlist)) > 1:
raise SmvRuntimeError("TimeStamp type has multiple formats: {}".format(set(tfmtlist)))
elif len(set(tfmtlist)) == 1:
timestampFormat = tfmtlist[0]
else:
timestampFormat = None
if dateFormat:
attrs.update({"dateFormat": dateFormat})
if timestampFormat:
attrs.update({"timestampFormat": timestampFormat})
schema = T.StructType(fieldlist)
return (schema, attrs)
def _fullStrToSchema(self, smvStr):
(s, a) = self._strListToSchema(smvStr.split(";"))
return (s, a)
def toStrForFile(self):
attrStr = "\n".join(["@{} = {}".format(k, v) for (k, v) in self.attributes.items()])
s = self.schema
fmtStr = "\n".join([
"{}: {} @metadata={}".format(name, s[name].dataType.typeName(), json.dumps(s[name].metadata))
for name in s.fieldNames()
])
return attrStr + "\n\n" + fmtStr
def addCsvAttributes(self, attr):
self.attributes.update(attr)
return self
@classmethod
def dicoverFromInferedDF(cls, df):
raw_schema = df.schema
first_row = df.limit(1).collect()[0]
new_schema = T.StructType([])
for n in raw_schema.fieldNames():
name_norm = re.sub(r"\W+", "_", n.strip())
dtype = raw_schema[n].dataType
meta = {"smvDesc": str(first_row[n])}
new_schema.add(name_norm, dtype, True, meta)
return cls(new_schema).addCsvAttributes({"has-header": "true"})
| 36.224242 | 134 | 0.552953 |
import re
import json
import pyspark.sql.types as T
from smv.error import SmvRuntimeError
from smv.utils import is_string
from smv.csv_attributes import CsvAttributes
class SmvSchema(object):
def __init__(self, _schema):
if is_string(_schema):
(s, a) = self._fullStrToSchema(_schema)
elif isinstance(_schema, T.StructType):
(s, a) = (
_schema,
CsvAttributes(),
)
else:
raise SmvRuntimeError("Unsupported schema type: {}".format(type(_schema)))
self.schema = s
self.attributes = a
def updateAttrs(self, attrs):
self.attributes.update(attrs)
return self
def _strToStructField(self, fieldStr):
pattern = re.compile(r"""\s*(?P<name>[^:]*?)\s*: # Col Name part
\s*(?P<dtype>[^@]*?)\s* # Type part
(@metadata=(?P<meta>.*))? # Meta if any
\Z #end of string""", re.VERBOSE)
match = pattern.match(fieldStr)
name = match.group('name')
dtype = match.group('dtype')
meta = match.group('meta') or "{}"
dfmtStr = None
tfmtStr = None
if (re.match(r"[Dd]ecimal", dtype)):
dpat = re.compile(r"""[Dd]ecimal(\[ *(?P<precision>\d+) *(, *(?P<scale>\d+) *)?\])?""")
dmatch = dpat.match(dtype)
precision = dmatch.group('precision') or 10
scale = dmatch.group('scale') or 0
dtypeStr = "decimal({},{})".format(precision, scale)
elif (re.match(r"[Dd]ate", dtype)):
dmatch = re.match(r"[Dd]ate(\[(?P<fmt>.+)\])?", dtype)
dfmtStr = dmatch.group('fmt')
dtypeStr = "date"
elif (re.match(r"[Tt]imestamp", dtype)):
dmatch = re.match(r"[Tt]imestamp(\[(?P<fmt>.+)\])?", dtype)
tfmtStr = dmatch.group('fmt')
dtypeStr = "timestamp"
elif (re.match(r"[Ss]tring", dtype)):
dtypeStr = "string"
else:
dtypeStr = dtype.lower()
fieldJson = {
"name": name,
"type": dtypeStr,
"nullable": True,
"metadata": json.loads(meta)
}
field = T.StructField.fromJson(fieldJson)
return (field, dfmtStr, tfmtStr)
def _strToAttr(self, attrStr):
pattern = re.compile(r"@\s*(?P<name>\S*)\s*=\s*(?P<value>\S*)\s*")
match = pattern.match(attrStr)
name = match.group('name')
value = match.group('value')
return (name, value)
def _strListToSchema(self, smvStrs):
no_comm = [re.sub(';[ \t]*$', '', r).strip() for r in smvStrs if not (re.match(r"^(//|#).*$", r) or re.match(r"^[ \t]*$", r))]
attrStrs = [s for s in no_comm if s.startswith("@")]
fieldStrs = [s for s in no_comm if not s.startswith("@")]
attrs = dict([self._strToAttr(a) for a in attrStrs])
fieldlist = []
dfmtlist = []
tfmtlist = []
for s in fieldStrs:
(field, dfmt, tfmt) = self._strToStructField(s)
fieldlist.append(field)
if dfmt:
dfmtlist.append(dfmt)
if tfmt:
tfmtlist.append(tfmt)
if len(set(dfmtlist)) > 1:
raise SmvRuntimeError("Date type has multiple formats: {}".format(set(dfmtlist)))
elif len(set(dfmtlist)) == 1:
dateFormat = dfmtlist[0]
else:
dateFormat = None
if len(set(tfmtlist)) > 1:
raise SmvRuntimeError("TimeStamp type has multiple formats: {}".format(set(tfmtlist)))
elif len(set(tfmtlist)) == 1:
timestampFormat = tfmtlist[0]
else:
timestampFormat = None
if dateFormat:
attrs.update({"dateFormat": dateFormat})
if timestampFormat:
attrs.update({"timestampFormat": timestampFormat})
schema = T.StructType(fieldlist)
return (schema, attrs)
def _fullStrToSchema(self, smvStr):
(s, a) = self._strListToSchema(smvStr.split(";"))
return (s, a)
def toStrForFile(self):
attrStr = "\n".join(["@{} = {}".format(k, v) for (k, v) in self.attributes.items()])
s = self.schema
fmtStr = "\n".join([
"{}: {} @metadata={}".format(name, s[name].dataType.typeName(), json.dumps(s[name].metadata))
for name in s.fieldNames()
])
return attrStr + "\n\n" + fmtStr
def addCsvAttributes(self, attr):
self.attributes.update(attr)
return self
@classmethod
def dicoverFromInferedDF(cls, df):
raw_schema = df.schema
first_row = df.limit(1).collect()[0]
new_schema = T.StructType([])
for n in raw_schema.fieldNames():
name_norm = re.sub(r"\W+", "_", n.strip())
dtype = raw_schema[n].dataType
meta = {"smvDesc": str(first_row[n])}
new_schema.add(name_norm, dtype, True, meta)
return cls(new_schema).addCsvAttributes({"has-header": "true"})
| true | true |
1c47246c602d65778a1c94df5d2b5e2fea0f4544 | 2,314 | py | Python | tests/test_return_values.py | Plan9-Archive/limbo-qt-bridge | 8c1cc4ee3a4d10c3a129a9ea103ef318c533e4fe | [
"MIT"
] | null | null | null | tests/test_return_values.py | Plan9-Archive/limbo-qt-bridge | 8c1cc4ee3a4d10c3a129a9ea103ef318c533e4fe | [
"MIT"
] | null | null | null | tests/test_return_values.py | Plan9-Archive/limbo-qt-bridge | 8c1cc4ee3a4d10c3a129a9ea103ef318c533e4fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys, time
def send(message):
sys.stdout.write("%i %s" % (len(message), message))
sys.stdout.flush()
def enc(value, type_):
s = str(value)
return "%s%i %s" % (type_, len(s), s)
def create(id_, name, class_):
send("%s %s %s %s\n" % (enc("create", "s"), enc(id_, "i"), enc(name, "s"), enc(class_, "C")))
type_to_str = {str: "s", int: "i"}
def call(id_, obj, method, *args):
a = []
for arg in args:
c = type_to_str[type(arg)]
a.append(enc(arg, c))
send("%s %s %s %s %s\n" % (enc("call", "s"), enc(id_, "i"), enc(obj, "I"),
enc(method, "s"), " ".join(a)))
def receive(expecting):
in_message = False
current = ""
length = 0
while True:
current += sys.stdin.read(1)
if not in_message:
space = current.find(" ")
if space == -1:
continue
length = int(current[:space])
current = current[space + 1:]
in_message = True
if len(current) >= length:
content = current[:length]
args = parse(content)
if tuple(args[:2]) == expecting:
return args[2:]
current = current[length:]
in_message = False
str_to_type = {"i": int, "s": str}
def parse(text):
args = []
i = 0
while i < len(text):
type_ = text[i]
space = text.find(" ", i)
length = int(text[i + 1:space])
value = text[space + 1:space + 1 + length]
if type_ in str_to_type:
value = str_to_type[type_](value)
elif type_ == "N":
value = None
args.append(value)
i = space + 1 + length + 1
return args
def call_receive(id_, obj, method, *args):
call(id_, obj, method, *args)
return receive(("value", 3))
if __name__ == "__main__":
create(0, "label", "QLabel")
call(1, "label", "setText", 'Hello "World"!')
call(2, "label", "show")
width = int(call_receive(3, "label", "width")[0])
height = int(call_receive(3, "label", "height")[0])
time.sleep(2)
call(4, "label", "resize", width * 2, height * 4)
time.sleep(2)
call(5, "label", "close")
| 24.104167 | 97 | 0.491357 |
import sys, time
def send(message):
sys.stdout.write("%i %s" % (len(message), message))
sys.stdout.flush()
def enc(value, type_):
s = str(value)
return "%s%i %s" % (type_, len(s), s)
def create(id_, name, class_):
send("%s %s %s %s\n" % (enc("create", "s"), enc(id_, "i"), enc(name, "s"), enc(class_, "C")))
type_to_str = {str: "s", int: "i"}
def call(id_, obj, method, *args):
a = []
for arg in args:
c = type_to_str[type(arg)]
a.append(enc(arg, c))
send("%s %s %s %s %s\n" % (enc("call", "s"), enc(id_, "i"), enc(obj, "I"),
enc(method, "s"), " ".join(a)))
def receive(expecting):
in_message = False
current = ""
length = 0
while True:
current += sys.stdin.read(1)
if not in_message:
space = current.find(" ")
if space == -1:
continue
length = int(current[:space])
current = current[space + 1:]
in_message = True
if len(current) >= length:
content = current[:length]
args = parse(content)
if tuple(args[:2]) == expecting:
return args[2:]
current = current[length:]
in_message = False
str_to_type = {"i": int, "s": str}
def parse(text):
args = []
i = 0
while i < len(text):
type_ = text[i]
space = text.find(" ", i)
length = int(text[i + 1:space])
value = text[space + 1:space + 1 + length]
if type_ in str_to_type:
value = str_to_type[type_](value)
elif type_ == "N":
value = None
args.append(value)
i = space + 1 + length + 1
return args
def call_receive(id_, obj, method, *args):
call(id_, obj, method, *args)
return receive(("value", 3))
if __name__ == "__main__":
create(0, "label", "QLabel")
call(1, "label", "setText", 'Hello "World"!')
call(2, "label", "show")
width = int(call_receive(3, "label", "width")[0])
height = int(call_receive(3, "label", "height")[0])
time.sleep(2)
call(4, "label", "resize", width * 2, height * 4)
time.sleep(2)
call(5, "label", "close")
| true | true |
1c47251f1d885a2899627ad8fe90b650f45dcd7a | 420 | py | Python | ex058.py | dsjocimar/python | 5716f46a9fa7f64aa78a39df9c262c5392571340 | [
"MIT"
] | null | null | null | ex058.py | dsjocimar/python | 5716f46a9fa7f64aa78a39df9c262c5392571340 | [
"MIT"
] | null | null | null | ex058.py | dsjocimar/python | 5716f46a9fa7f64aa78a39df9c262c5392571340 | [
"MIT"
] | null | null | null | # Exercício 058
from random import randint
tentativas = 0
computador = randint(0, 10)
jogador = int(input('TENTE ADIVINHAR QUAL NÚMERO EU ESTOU PENSANDO, DE 0 A 10...'))
print('PROCESSANDO...')
while jogador != computador:
jogador = int(input('VOCÊ ERROU! TENTE NOVAMENTE!:\n'))
tentativas += 1
if tentativas == 0:
tentativas = 1
print(f'PARABÉNS! VOCÊ ACERTOU! VOCÊ UTILIZOU DE {tentativas} TENTATIVA(S)')
| 30 | 83 | 0.707143 |
from random import randint
tentativas = 0
computador = randint(0, 10)
jogador = int(input('TENTE ADIVINHAR QUAL NÚMERO EU ESTOU PENSANDO, DE 0 A 10...'))
print('PROCESSANDO...')
while jogador != computador:
jogador = int(input('VOCÊ ERROU! TENTE NOVAMENTE!:\n'))
tentativas += 1
if tentativas == 0:
tentativas = 1
print(f'PARABÉNS! VOCÊ ACERTOU! VOCÊ UTILIZOU DE {tentativas} TENTATIVA(S)')
| true | true |
1c4727047e4e277527b4df35a4017b33843a4678 | 613 | py | Python | runpandas/_testing.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 11 | 2020-12-04T20:43:23.000Z | 2022-03-16T19:19:12.000Z | runpandas/_testing.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 45 | 2020-06-23T02:50:31.000Z | 2022-02-15T16:56:00.000Z | runpandas/_testing.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 4 | 2021-11-11T15:23:04.000Z | 2022-02-02T13:02:12.000Z | """
Utilities for testing purposes.
"""
import wrapt
def skip_on_exception(exp):
"""
Skip a test if a specific Exception is raised. This is because
the Exception is raised for reasons beyond our control (e.g.
flakey 3rd-party API).
a signature-preserving decorator
Parameters
----------
exp : The Exception under which to execute try-except.
"""
from pytest import skip
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
try:
return wrapped(*args, **kwargs)
except exp as e:
skip(str(e))
return wrapper
| 21.137931 | 66 | 0.62969 |
import wrapt
def skip_on_exception(exp):
from pytest import skip
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
try:
return wrapped(*args, **kwargs)
except exp as e:
skip(str(e))
return wrapper
| true | true |
1c472708a7a3874db4d2144abf7360285dc39c2d | 1,385 | py | Python | Lib/xml/dom/html/HTMLHtmlElement.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-07-09T20:02:48.000Z | 2021-11-21T20:00:37.000Z | Lib/xml/dom/html/HTMLHtmlElement.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | Lib/xml/dom/html/HTMLHtmlElement.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | ########################################################################
#
# File Name: HTMLHtmlElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLHtmlElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLHtmlElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="HTML"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_version(self):
return self.getAttribute("VERSION")
def _set_version(self, value):
self.setAttribute("VERSION", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"version" : _get_version
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"version" : _set_version
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| 28.265306 | 77 | 0.65343 | true | true | |
1c472740677dec56b7aeda2e3690aaa75e2d07c1 | 35,268 | py | Python | sdk/python/feast/registry.py | danilopeixoto/feast | 57d134355364654a2275b477b3b82b149f0779ca | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/registry.py | danilopeixoto/feast | 57d134355364654a2275b477b3b82b149f0779ca | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/registry.py | danilopeixoto/feast | 57d134355364654a2275b477b3b82b149f0779ca | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from collections import defaultdict
from datetime import datetime, timedelta
from enum import Enum
from pathlib import Path
from threading import Lock
from typing import Any, Dict, List, Optional, Set
from urllib.parse import urlparse
from google.protobuf.internal.containers import RepeatedCompositeFieldContainer
from google.protobuf.json_format import MessageToJson
from proto import Message
from feast.base_feature_view import BaseFeatureView
from feast.entity import Entity
from feast.errors import (
ConflictingFeatureViewNames,
EntityNotFoundException,
FeatureServiceNotFoundException,
FeatureViewNotFoundException,
OnDemandFeatureViewNotFoundException,
SavedDatasetNotFound,
)
from feast.feature_service import FeatureService
from feast.feature_view import FeatureView
from feast.importer import import_class
from feast.infra.infra_object import Infra
from feast.on_demand_feature_view import OnDemandFeatureView
from feast.protos.feast.core.Registry_pb2 import Registry as RegistryProto
from feast.registry_store import NoopRegistryStore
from feast.repo_config import RegistryConfig
from feast.repo_contents import RepoContents
from feast.request_feature_view import RequestFeatureView
from feast.saved_dataset import SavedDataset
REGISTRY_SCHEMA_VERSION = "1"
REGISTRY_STORE_CLASS_FOR_TYPE = {
"GCSRegistryStore": "feast.infra.gcp.GCSRegistryStore",
"S3RegistryStore": "feast.infra.aws.S3RegistryStore",
"LocalRegistryStore": "feast.infra.local.LocalRegistryStore",
}
REGISTRY_STORE_CLASS_FOR_SCHEME = {
"gs": "GCSRegistryStore",
"s3": "S3RegistryStore",
"file": "LocalRegistryStore",
"": "LocalRegistryStore",
}
class FeastObjectType(Enum):
ENTITY = "entity"
FEATURE_VIEW = "feature view"
ON_DEMAND_FEATURE_VIEW = "on demand feature view"
REQUEST_FEATURE_VIEW = "request feature view"
FEATURE_SERVICE = "feature service"
@staticmethod
def get_objects_from_registry(
registry: "Registry", project: str
) -> Dict["FeastObjectType", List[Any]]:
return {
FeastObjectType.ENTITY: registry.list_entities(project=project),
FeastObjectType.FEATURE_VIEW: registry.list_feature_views(project=project),
FeastObjectType.ON_DEMAND_FEATURE_VIEW: registry.list_on_demand_feature_views(
project=project
),
FeastObjectType.REQUEST_FEATURE_VIEW: registry.list_request_feature_views(
project=project
),
FeastObjectType.FEATURE_SERVICE: registry.list_feature_services(
project=project
),
}
@staticmethod
def get_objects_from_repo_contents(
repo_contents: RepoContents,
) -> Dict["FeastObjectType", Set[Any]]:
return {
FeastObjectType.ENTITY: repo_contents.entities,
FeastObjectType.FEATURE_VIEW: repo_contents.feature_views,
FeastObjectType.ON_DEMAND_FEATURE_VIEW: repo_contents.on_demand_feature_views,
FeastObjectType.REQUEST_FEATURE_VIEW: repo_contents.request_feature_views,
FeastObjectType.FEATURE_SERVICE: repo_contents.feature_services,
}
FEAST_OBJECT_TYPES = [feast_object_type for feast_object_type in FeastObjectType]
logger = logging.getLogger(__name__)
def get_registry_store_class_from_type(registry_store_type: str):
if not registry_store_type.endswith("RegistryStore"):
raise Exception('Registry store class name should end with "RegistryStore"')
if registry_store_type in REGISTRY_STORE_CLASS_FOR_TYPE:
registry_store_type = REGISTRY_STORE_CLASS_FOR_TYPE[registry_store_type]
module_name, registry_store_class_name = registry_store_type.rsplit(".", 1)
return import_class(module_name, registry_store_class_name, "RegistryStore")
def get_registry_store_class_from_scheme(registry_path: str):
uri = urlparse(registry_path)
if uri.scheme not in REGISTRY_STORE_CLASS_FOR_SCHEME:
raise Exception(
f"Registry path {registry_path} has unsupported scheme {uri.scheme}. "
f"Supported schemes are file, s3 and gs."
)
else:
registry_store_type = REGISTRY_STORE_CLASS_FOR_SCHEME[uri.scheme]
return get_registry_store_class_from_type(registry_store_type)
class Registry:
"""
Registry: A registry allows for the management and persistence of feature definitions and related metadata.
"""
# The cached_registry_proto object is used for both reads and writes. In particular,
# all write operations refresh the cache and modify it in memory; the write must
# then be persisted to the underlying RegistryStore with a call to commit().
cached_registry_proto: Optional[RegistryProto] = None
cached_registry_proto_created: Optional[datetime] = None
cached_registry_proto_ttl: timedelta
def __init__(
self, registry_config: Optional[RegistryConfig], repo_path: Optional[Path]
):
"""
Create the Registry object.
Args:
registry_config: RegistryConfig object containing the destination path and cache ttl,
repo_path: Path to the base of the Feast repository
or where it will be created if it does not exist yet.
"""
self._refresh_lock = Lock()
if registry_config:
registry_store_type = registry_config.registry_store_type
registry_path = registry_config.path
if registry_store_type is None:
cls = get_registry_store_class_from_scheme(registry_path)
else:
cls = get_registry_store_class_from_type(str(registry_store_type))
self._registry_store = cls(registry_config, repo_path)
self.cached_registry_proto_ttl = timedelta(
seconds=registry_config.cache_ttl_seconds
if registry_config.cache_ttl_seconds is not None
else 0
)
def clone(self) -> "Registry":
new_registry = Registry(None, None)
new_registry.cached_registry_proto_ttl = timedelta(seconds=0)
new_registry.cached_registry_proto = (
self.cached_registry_proto.__deepcopy__()
if self.cached_registry_proto
else RegistryProto()
)
new_registry.cached_registry_proto_created = datetime.utcnow()
new_registry._registry_store = NoopRegistryStore()
return new_registry
def _initialize_registry(self):
"""Explicitly initializes the registry with an empty proto if it doesn't exist."""
try:
self._get_registry_proto()
except FileNotFoundError:
registry_proto = RegistryProto()
registry_proto.registry_schema_version = REGISTRY_SCHEMA_VERSION
self._registry_store.update_registry_proto(registry_proto)
def update_infra(self, infra: Infra, project: str, commit: bool = True):
"""
Updates the stored Infra object.
Args:
infra: The new Infra object to be stored.
project: Feast project that the Infra object refers to
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
self.cached_registry_proto.infra.CopyFrom(infra.to_proto())
if commit:
self.commit()
def get_infra(self, project: str, allow_cache: bool = False) -> Infra:
"""
Retrieves the stored Infra object.
Args:
project: Feast project that the Infra object refers to
allow_cache: Whether to allow returning this entity from a cached registry
Returns:
The stored Infra object.
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
return Infra.from_proto(registry_proto.infra)
def apply_entity(self, entity: Entity, project: str, commit: bool = True):
"""
Registers a single entity with Feast
Args:
entity: Entity that will be registered
project: Feast project that this entity belongs to
commit: Whether the change should be persisted immediately
"""
entity.is_valid()
now = datetime.utcnow()
if not entity.created_timestamp:
entity.created_timestamp = now
entity.last_updated_timestamp = now
entity_proto = entity.to_proto()
entity_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_entity_proto in enumerate(
self.cached_registry_proto.entities
):
if (
existing_entity_proto.spec.name == entity_proto.spec.name
and existing_entity_proto.spec.project == project
):
del self.cached_registry_proto.entities[idx]
break
self.cached_registry_proto.entities.append(entity_proto)
if commit:
self.commit()
def list_entities(self, project: str, allow_cache: bool = False) -> List[Entity]:
"""
Retrieve a list of entities from the registry
Args:
allow_cache: Whether to allow returning entities from a cached registry
project: Filter entities based on project name
Returns:
List of entities
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
entities = []
for entity_proto in registry_proto.entities:
if entity_proto.spec.project == project:
entities.append(Entity.from_proto(entity_proto))
return entities
def apply_feature_service(
self, feature_service: FeatureService, project: str, commit: bool = True
):
"""
Registers a single feature service with Feast
Args:
feature_service: A feature service that will be registered
project: Feast project that this entity belongs to
"""
now = datetime.utcnow()
if not feature_service.created_timestamp:
feature_service.created_timestamp = now
feature_service.last_updated_timestamp = now
feature_service_proto = feature_service.to_proto()
feature_service_proto.spec.project = project
registry = self._prepare_registry_for_changes()
for idx, existing_feature_service_proto in enumerate(registry.feature_services):
if (
existing_feature_service_proto.spec.name
== feature_service_proto.spec.name
and existing_feature_service_proto.spec.project == project
):
del registry.feature_services[idx]
registry.feature_services.append(feature_service_proto)
if commit:
self.commit()
def list_feature_services(
self, project: str, allow_cache: bool = False
) -> List[FeatureService]:
"""
Retrieve a list of feature services from the registry
Args:
allow_cache: Whether to allow returning entities from a cached registry
project: Filter entities based on project name
Returns:
List of feature services
"""
registry = self._get_registry_proto(allow_cache=allow_cache)
feature_services = []
for feature_service_proto in registry.feature_services:
if feature_service_proto.spec.project == project:
feature_services.append(
FeatureService.from_proto(feature_service_proto)
)
return feature_services
def get_feature_service(
self, name: str, project: str, allow_cache: bool = False
) -> FeatureService:
"""
Retrieves a feature service.
Args:
name: Name of feature service
project: Feast project that this feature service belongs to
allow_cache: Whether to allow returning this feature service from a cached registry
Returns:
Returns either the specified feature service, or raises an exception if
none is found
"""
registry = self._get_registry_proto(allow_cache=allow_cache)
for feature_service_proto in registry.feature_services:
if (
feature_service_proto.spec.project == project
and feature_service_proto.spec.name == name
):
return FeatureService.from_proto(feature_service_proto)
raise FeatureServiceNotFoundException(name, project=project)
def get_entity(self, name: str, project: str, allow_cache: bool = False) -> Entity:
"""
Retrieves an entity.
Args:
name: Name of entity
project: Feast project that this entity belongs to
allow_cache: Whether to allow returning this entity from a cached registry
Returns:
Returns either the specified entity, or raises an exception if
none is found
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for entity_proto in registry_proto.entities:
if entity_proto.spec.name == name and entity_proto.spec.project == project:
return Entity.from_proto(entity_proto)
raise EntityNotFoundException(name, project=project)
def apply_feature_view(
self, feature_view: BaseFeatureView, project: str, commit: bool = True
):
"""
Registers a single feature view with Feast
Args:
feature_view: Feature view that will be registered
project: Feast project that this feature view belongs to
commit: Whether the change should be persisted immediately
"""
feature_view.ensure_valid()
now = datetime.utcnow()
if not feature_view.created_timestamp:
feature_view.created_timestamp = now
feature_view.last_updated_timestamp = now
feature_view_proto = feature_view.to_proto()
feature_view_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
self._check_conflicting_feature_view_names(feature_view)
existing_feature_views_of_same_type: RepeatedCompositeFieldContainer
if isinstance(feature_view, FeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.feature_views
)
elif isinstance(feature_view, OnDemandFeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.on_demand_feature_views
)
elif isinstance(feature_view, RequestFeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.request_feature_views
)
else:
raise ValueError(f"Unexpected feature view type: {type(feature_view)}")
for idx, existing_feature_view_proto in enumerate(
existing_feature_views_of_same_type
):
if (
existing_feature_view_proto.spec.name == feature_view_proto.spec.name
and existing_feature_view_proto.spec.project == project
):
if (
feature_view.__class__.from_proto(existing_feature_view_proto)
== feature_view
):
return
else:
del existing_feature_views_of_same_type[idx]
break
existing_feature_views_of_same_type.append(feature_view_proto)
if commit:
self.commit()
def list_on_demand_feature_views(
self, project: str, allow_cache: bool = False
) -> List[OnDemandFeatureView]:
"""
Retrieve a list of on demand feature views from the registry
Args:
project: Filter on demand feature views based on project name
allow_cache: Whether to allow returning on demand feature views from a cached registry
Returns:
List of on demand feature views
"""
registry = self._get_registry_proto(allow_cache=allow_cache)
on_demand_feature_views = []
for on_demand_feature_view in registry.on_demand_feature_views:
if on_demand_feature_view.spec.project == project:
on_demand_feature_views.append(
OnDemandFeatureView.from_proto(on_demand_feature_view)
)
return on_demand_feature_views
def get_on_demand_feature_view(
self, name: str, project: str, allow_cache: bool = False
) -> OnDemandFeatureView:
"""
Retrieves an on demand feature view.
Args:
name: Name of on demand feature view
project: Feast project that this on demand feature belongs to
Returns:
Returns either the specified on demand feature view, or raises an exception if
none is found
"""
registry = self._get_registry_proto(allow_cache=allow_cache)
for on_demand_feature_view in registry.on_demand_feature_views:
if (
on_demand_feature_view.spec.project == project
and on_demand_feature_view.spec.name == name
):
return OnDemandFeatureView.from_proto(on_demand_feature_view)
raise OnDemandFeatureViewNotFoundException(name, project=project)
def apply_materialization(
self,
feature_view: FeatureView,
project: str,
start_date: datetime,
end_date: datetime,
commit: bool = True,
):
"""
Updates materialization intervals tracked for a single feature view in Feast
Args:
feature_view: Feature view that will be updated with an additional materialization interval tracked
project: Feast project that this feature view belongs to
start_date (datetime): Start date of the materialization interval to track
end_date (datetime): End date of the materialization interval to track
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_feature_view_proto in enumerate(
self.cached_registry_proto.feature_views
):
if (
existing_feature_view_proto.spec.name == feature_view.name
and existing_feature_view_proto.spec.project == project
):
existing_feature_view = FeatureView.from_proto(
existing_feature_view_proto
)
existing_feature_view.materialization_intervals.append(
(start_date, end_date)
)
existing_feature_view.last_updated_timestamp = datetime.utcnow()
feature_view_proto = existing_feature_view.to_proto()
feature_view_proto.spec.project = project
del self.cached_registry_proto.feature_views[idx]
self.cached_registry_proto.feature_views.append(feature_view_proto)
if commit:
self.commit()
return
raise FeatureViewNotFoundException(feature_view.name, project)
def list_feature_views(
self, project: str, allow_cache: bool = False
) -> List[FeatureView]:
"""
Retrieve a list of feature views from the registry
Args:
allow_cache: Allow returning feature views from the cached registry
project: Filter feature views based on project name
Returns:
List of feature views
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
feature_views: List[FeatureView] = []
for feature_view_proto in registry_proto.feature_views:
if feature_view_proto.spec.project == project:
feature_views.append(FeatureView.from_proto(feature_view_proto))
return feature_views
def list_request_feature_views(
self, project: str, allow_cache: bool = False
) -> List[RequestFeatureView]:
"""
Retrieve a list of request feature views from the registry
Args:
allow_cache: Allow returning feature views from the cached registry
project: Filter feature views based on project name
Returns:
List of feature views
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
feature_views: List[RequestFeatureView] = []
for request_feature_view_proto in registry_proto.request_feature_views:
if request_feature_view_proto.spec.project == project:
feature_views.append(
RequestFeatureView.from_proto(request_feature_view_proto)
)
return feature_views
def get_feature_view(
self, name: str, project: str, allow_cache: bool = False
) -> FeatureView:
"""
Retrieves a feature view.
Args:
name: Name of feature view
project: Feast project that this feature view belongs to
allow_cache: Allow returning feature view from the cached registry
Returns:
Returns either the specified feature view, or raises an exception if
none is found
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for feature_view_proto in registry_proto.feature_views:
if (
feature_view_proto.spec.name == name
and feature_view_proto.spec.project == project
):
return FeatureView.from_proto(feature_view_proto)
raise FeatureViewNotFoundException(name, project)
def delete_feature_service(self, name: str, project: str, commit: bool = True):
"""
Deletes a feature service or raises an exception if not found.
Args:
name: Name of feature service
project: Feast project that this feature service belongs to
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, feature_service_proto in enumerate(
self.cached_registry_proto.feature_services
):
if (
feature_service_proto.spec.name == name
and feature_service_proto.spec.project == project
):
del self.cached_registry_proto.feature_services[idx]
if commit:
self.commit()
return
raise FeatureServiceNotFoundException(name, project)
def delete_feature_view(self, name: str, project: str, commit: bool = True):
"""
Deletes a feature view or raises an exception if not found.
Args:
name: Name of feature view
project: Feast project that this feature view belongs to
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_feature_view_proto in enumerate(
self.cached_registry_proto.feature_views
):
if (
existing_feature_view_proto.spec.name == name
and existing_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.feature_views[idx]
if commit:
self.commit()
return
for idx, existing_request_feature_view_proto in enumerate(
self.cached_registry_proto.request_feature_views
):
if (
existing_request_feature_view_proto.spec.name == name
and existing_request_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.request_feature_views[idx]
if commit:
self.commit()
return
for idx, existing_on_demand_feature_view_proto in enumerate(
self.cached_registry_proto.on_demand_feature_views
):
if (
existing_on_demand_feature_view_proto.spec.name == name
and existing_on_demand_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.on_demand_feature_views[idx]
if commit:
self.commit()
return
raise FeatureViewNotFoundException(name, project)
def delete_entity(self, name: str, project: str, commit: bool = True):
"""
Deletes an entity or raises an exception if not found.
Args:
name: Name of entity
project: Feast project that this entity belongs to
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_entity_proto in enumerate(
self.cached_registry_proto.entities
):
if (
existing_entity_proto.spec.name == name
and existing_entity_proto.spec.project == project
):
del self.cached_registry_proto.entities[idx]
if commit:
self.commit()
return
raise EntityNotFoundException(name, project)
def apply_saved_dataset(
self, saved_dataset: SavedDataset, project: str, commit: bool = True
):
"""
Registers a single entity with Feast
Args:
saved_dataset: SavedDataset that will be added / updated to registry
project: Feast project that this dataset belongs to
commit: Whether the change should be persisted immediately
"""
now = datetime.utcnow()
if not saved_dataset.created_timestamp:
saved_dataset.created_timestamp = now
saved_dataset.last_updated_timestamp = now
saved_dataset_proto = saved_dataset.to_proto()
saved_dataset_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_saved_dataset_proto in enumerate(
self.cached_registry_proto.saved_datasets
):
if (
existing_saved_dataset_proto.spec.name == saved_dataset_proto.spec.name
and existing_saved_dataset_proto.spec.project == project
):
del self.cached_registry_proto.saved_datasets[idx]
break
self.cached_registry_proto.saved_datasets.append(saved_dataset_proto)
if commit:
self.commit()
def get_saved_dataset(
self, name: str, project: str, allow_cache: bool = False
) -> SavedDataset:
"""
Retrieves a saved dataset.
Args:
name: Name of dataset
project: Feast project that this dataset belongs to
allow_cache: Whether to allow returning this dataset from a cached registry
Returns:
Returns either the specified SavedDataset, or raises an exception if
none is found
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for saved_dataset in registry_proto.saved_datasets:
if (
saved_dataset.spec.name == name
and saved_dataset.spec.project == project
):
return SavedDataset.from_proto(saved_dataset)
raise SavedDatasetNotFound(name, project=project)
def list_saved_datasets(
self, project: str, allow_cache: bool = False
) -> List[SavedDataset]:
"""
Retrieves a list of all saved datasets in specified project
Args:
project: Feast project
allow_cache: Whether to allow returning this dataset from a cached registry
Returns:
Returns the list of SavedDatasets
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
return [
SavedDataset.from_proto(saved_dataset)
for saved_dataset in registry_proto.saved_datasets
if saved_dataset.spec.project == project
]
def commit(self):
"""Commits the state of the registry cache to the remote registry store."""
if self.cached_registry_proto:
self._registry_store.update_registry_proto(self.cached_registry_proto)
def refresh(self):
"""Refreshes the state of the registry cache by fetching the registry state from the remote registry store."""
self._get_registry_proto(allow_cache=False)
def teardown(self):
"""Tears down (removes) the registry."""
self._registry_store.teardown()
def to_dict(self, project: str) -> Dict[str, List[Any]]:
"""Returns a dictionary representation of the registry contents for the specified project.
For each list in the dictionary, the elements are sorted by name, so this
method can be used to compare two registries.
Args:
project: Feast project to convert to a dict
"""
registry_dict = defaultdict(list)
for entity in sorted(
self.list_entities(project=project), key=lambda entity: entity.name
):
registry_dict["entities"].append(
self._message_to_sorted_dict(entity.to_proto())
)
for feature_view in sorted(
self.list_feature_views(project=project),
key=lambda feature_view: feature_view.name,
):
registry_dict["featureViews"].append(
self._message_to_sorted_dict(feature_view.to_proto())
)
for feature_service in sorted(
self.list_feature_services(project=project),
key=lambda feature_service: feature_service.name,
):
registry_dict["featureServices"].append(
self._message_to_sorted_dict(feature_service.to_proto())
)
for on_demand_feature_view in sorted(
self.list_on_demand_feature_views(project=project),
key=lambda on_demand_feature_view: on_demand_feature_view.name,
):
registry_dict["onDemandFeatureViews"].append(
self._message_to_sorted_dict(on_demand_feature_view.to_proto())
)
for request_feature_view in sorted(
self.list_request_feature_views(project=project),
key=lambda request_feature_view: request_feature_view.name,
):
registry_dict["requestFeatureViews"].append(
self._message_to_sorted_dict(request_feature_view.to_proto())
)
for saved_dataset in sorted(
self.list_saved_datasets(project=project), key=lambda item: item.name
):
registry_dict["savedDatasets"].append(
self._message_to_sorted_dict(saved_dataset.to_proto())
)
for infra_object in sorted(self.get_infra(project=project).infra_objects):
registry_dict["infra"].append(
self._message_to_sorted_dict(infra_object.to_proto())
)
return registry_dict
@staticmethod
def _message_to_sorted_dict(message: Message) -> Dict[str, Any]:
return json.loads(MessageToJson(message, sort_keys=True))
def _prepare_registry_for_changes(self):
"""Prepares the Registry for changes by refreshing the cache if necessary."""
try:
self._get_registry_proto(allow_cache=True)
except FileNotFoundError:
registry_proto = RegistryProto()
registry_proto.registry_schema_version = REGISTRY_SCHEMA_VERSION
self.cached_registry_proto = registry_proto
self.cached_registry_proto_created = datetime.utcnow()
return self.cached_registry_proto
def _get_registry_proto(self, allow_cache: bool = False) -> RegistryProto:
"""Returns the cached or remote registry state
Args:
allow_cache: Whether to allow the use of the registry cache when fetching the RegistryProto
Returns: Returns a RegistryProto object which represents the state of the registry
"""
with self._refresh_lock:
expired = (
self.cached_registry_proto is None
or self.cached_registry_proto_created is None
) or (
self.cached_registry_proto_ttl.total_seconds()
> 0 # 0 ttl means infinity
and (
datetime.utcnow()
> (
self.cached_registry_proto_created
+ self.cached_registry_proto_ttl
)
)
)
if allow_cache and not expired:
assert isinstance(self.cached_registry_proto, RegistryProto)
return self.cached_registry_proto
registry_proto = self._registry_store.get_registry_proto()
self.cached_registry_proto = registry_proto
self.cached_registry_proto_created = datetime.utcnow()
return registry_proto
def _check_conflicting_feature_view_names(self, feature_view: BaseFeatureView):
name_to_fv_protos = self._existing_feature_view_names_to_fvs()
if feature_view.name in name_to_fv_protos:
if not isinstance(
name_to_fv_protos.get(feature_view.name), feature_view.proto_class
):
raise ConflictingFeatureViewNames(feature_view.name)
def _existing_feature_view_names_to_fvs(self) -> Dict[str, Message]:
assert self.cached_registry_proto
odfvs = {
fv.spec.name: fv
for fv in self.cached_registry_proto.on_demand_feature_views
}
fvs = {fv.spec.name: fv for fv in self.cached_registry_proto.feature_views}
request_fvs = {
fv.spec.name: fv for fv in self.cached_registry_proto.request_feature_views
}
return {**odfvs, **fvs, **request_fvs}
| 38.671053 | 118 | 0.651043 |
import json
import logging
from collections import defaultdict
from datetime import datetime, timedelta
from enum import Enum
from pathlib import Path
from threading import Lock
from typing import Any, Dict, List, Optional, Set
from urllib.parse import urlparse
from google.protobuf.internal.containers import RepeatedCompositeFieldContainer
from google.protobuf.json_format import MessageToJson
from proto import Message
from feast.base_feature_view import BaseFeatureView
from feast.entity import Entity
from feast.errors import (
ConflictingFeatureViewNames,
EntityNotFoundException,
FeatureServiceNotFoundException,
FeatureViewNotFoundException,
OnDemandFeatureViewNotFoundException,
SavedDatasetNotFound,
)
from feast.feature_service import FeatureService
from feast.feature_view import FeatureView
from feast.importer import import_class
from feast.infra.infra_object import Infra
from feast.on_demand_feature_view import OnDemandFeatureView
from feast.protos.feast.core.Registry_pb2 import Registry as RegistryProto
from feast.registry_store import NoopRegistryStore
from feast.repo_config import RegistryConfig
from feast.repo_contents import RepoContents
from feast.request_feature_view import RequestFeatureView
from feast.saved_dataset import SavedDataset
REGISTRY_SCHEMA_VERSION = "1"
REGISTRY_STORE_CLASS_FOR_TYPE = {
"GCSRegistryStore": "feast.infra.gcp.GCSRegistryStore",
"S3RegistryStore": "feast.infra.aws.S3RegistryStore",
"LocalRegistryStore": "feast.infra.local.LocalRegistryStore",
}
REGISTRY_STORE_CLASS_FOR_SCHEME = {
"gs": "GCSRegistryStore",
"s3": "S3RegistryStore",
"file": "LocalRegistryStore",
"": "LocalRegistryStore",
}
class FeastObjectType(Enum):
ENTITY = "entity"
FEATURE_VIEW = "feature view"
ON_DEMAND_FEATURE_VIEW = "on demand feature view"
REQUEST_FEATURE_VIEW = "request feature view"
FEATURE_SERVICE = "feature service"
@staticmethod
def get_objects_from_registry(
registry: "Registry", project: str
) -> Dict["FeastObjectType", List[Any]]:
return {
FeastObjectType.ENTITY: registry.list_entities(project=project),
FeastObjectType.FEATURE_VIEW: registry.list_feature_views(project=project),
FeastObjectType.ON_DEMAND_FEATURE_VIEW: registry.list_on_demand_feature_views(
project=project
),
FeastObjectType.REQUEST_FEATURE_VIEW: registry.list_request_feature_views(
project=project
),
FeastObjectType.FEATURE_SERVICE: registry.list_feature_services(
project=project
),
}
@staticmethod
def get_objects_from_repo_contents(
repo_contents: RepoContents,
) -> Dict["FeastObjectType", Set[Any]]:
return {
FeastObjectType.ENTITY: repo_contents.entities,
FeastObjectType.FEATURE_VIEW: repo_contents.feature_views,
FeastObjectType.ON_DEMAND_FEATURE_VIEW: repo_contents.on_demand_feature_views,
FeastObjectType.REQUEST_FEATURE_VIEW: repo_contents.request_feature_views,
FeastObjectType.FEATURE_SERVICE: repo_contents.feature_services,
}
FEAST_OBJECT_TYPES = [feast_object_type for feast_object_type in FeastObjectType]
logger = logging.getLogger(__name__)
def get_registry_store_class_from_type(registry_store_type: str):
if not registry_store_type.endswith("RegistryStore"):
raise Exception('Registry store class name should end with "RegistryStore"')
if registry_store_type in REGISTRY_STORE_CLASS_FOR_TYPE:
registry_store_type = REGISTRY_STORE_CLASS_FOR_TYPE[registry_store_type]
module_name, registry_store_class_name = registry_store_type.rsplit(".", 1)
return import_class(module_name, registry_store_class_name, "RegistryStore")
def get_registry_store_class_from_scheme(registry_path: str):
uri = urlparse(registry_path)
if uri.scheme not in REGISTRY_STORE_CLASS_FOR_SCHEME:
raise Exception(
f"Registry path {registry_path} has unsupported scheme {uri.scheme}. "
f"Supported schemes are file, s3 and gs."
)
else:
registry_store_type = REGISTRY_STORE_CLASS_FOR_SCHEME[uri.scheme]
return get_registry_store_class_from_type(registry_store_type)
class Registry:
cached_registry_proto: Optional[RegistryProto] = None
cached_registry_proto_created: Optional[datetime] = None
cached_registry_proto_ttl: timedelta
def __init__(
self, registry_config: Optional[RegistryConfig], repo_path: Optional[Path]
):
self._refresh_lock = Lock()
if registry_config:
registry_store_type = registry_config.registry_store_type
registry_path = registry_config.path
if registry_store_type is None:
cls = get_registry_store_class_from_scheme(registry_path)
else:
cls = get_registry_store_class_from_type(str(registry_store_type))
self._registry_store = cls(registry_config, repo_path)
self.cached_registry_proto_ttl = timedelta(
seconds=registry_config.cache_ttl_seconds
if registry_config.cache_ttl_seconds is not None
else 0
)
def clone(self) -> "Registry":
new_registry = Registry(None, None)
new_registry.cached_registry_proto_ttl = timedelta(seconds=0)
new_registry.cached_registry_proto = (
self.cached_registry_proto.__deepcopy__()
if self.cached_registry_proto
else RegistryProto()
)
new_registry.cached_registry_proto_created = datetime.utcnow()
new_registry._registry_store = NoopRegistryStore()
return new_registry
def _initialize_registry(self):
try:
self._get_registry_proto()
except FileNotFoundError:
registry_proto = RegistryProto()
registry_proto.registry_schema_version = REGISTRY_SCHEMA_VERSION
self._registry_store.update_registry_proto(registry_proto)
def update_infra(self, infra: Infra, project: str, commit: bool = True):
self._prepare_registry_for_changes()
assert self.cached_registry_proto
self.cached_registry_proto.infra.CopyFrom(infra.to_proto())
if commit:
self.commit()
def get_infra(self, project: str, allow_cache: bool = False) -> Infra:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
return Infra.from_proto(registry_proto.infra)
def apply_entity(self, entity: Entity, project: str, commit: bool = True):
entity.is_valid()
now = datetime.utcnow()
if not entity.created_timestamp:
entity.created_timestamp = now
entity.last_updated_timestamp = now
entity_proto = entity.to_proto()
entity_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_entity_proto in enumerate(
self.cached_registry_proto.entities
):
if (
existing_entity_proto.spec.name == entity_proto.spec.name
and existing_entity_proto.spec.project == project
):
del self.cached_registry_proto.entities[idx]
break
self.cached_registry_proto.entities.append(entity_proto)
if commit:
self.commit()
def list_entities(self, project: str, allow_cache: bool = False) -> List[Entity]:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
entities = []
for entity_proto in registry_proto.entities:
if entity_proto.spec.project == project:
entities.append(Entity.from_proto(entity_proto))
return entities
def apply_feature_service(
self, feature_service: FeatureService, project: str, commit: bool = True
):
now = datetime.utcnow()
if not feature_service.created_timestamp:
feature_service.created_timestamp = now
feature_service.last_updated_timestamp = now
feature_service_proto = feature_service.to_proto()
feature_service_proto.spec.project = project
registry = self._prepare_registry_for_changes()
for idx, existing_feature_service_proto in enumerate(registry.feature_services):
if (
existing_feature_service_proto.spec.name
== feature_service_proto.spec.name
and existing_feature_service_proto.spec.project == project
):
del registry.feature_services[idx]
registry.feature_services.append(feature_service_proto)
if commit:
self.commit()
def list_feature_services(
self, project: str, allow_cache: bool = False
) -> List[FeatureService]:
registry = self._get_registry_proto(allow_cache=allow_cache)
feature_services = []
for feature_service_proto in registry.feature_services:
if feature_service_proto.spec.project == project:
feature_services.append(
FeatureService.from_proto(feature_service_proto)
)
return feature_services
def get_feature_service(
self, name: str, project: str, allow_cache: bool = False
) -> FeatureService:
registry = self._get_registry_proto(allow_cache=allow_cache)
for feature_service_proto in registry.feature_services:
if (
feature_service_proto.spec.project == project
and feature_service_proto.spec.name == name
):
return FeatureService.from_proto(feature_service_proto)
raise FeatureServiceNotFoundException(name, project=project)
def get_entity(self, name: str, project: str, allow_cache: bool = False) -> Entity:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for entity_proto in registry_proto.entities:
if entity_proto.spec.name == name and entity_proto.spec.project == project:
return Entity.from_proto(entity_proto)
raise EntityNotFoundException(name, project=project)
def apply_feature_view(
self, feature_view: BaseFeatureView, project: str, commit: bool = True
):
feature_view.ensure_valid()
now = datetime.utcnow()
if not feature_view.created_timestamp:
feature_view.created_timestamp = now
feature_view.last_updated_timestamp = now
feature_view_proto = feature_view.to_proto()
feature_view_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
self._check_conflicting_feature_view_names(feature_view)
existing_feature_views_of_same_type: RepeatedCompositeFieldContainer
if isinstance(feature_view, FeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.feature_views
)
elif isinstance(feature_view, OnDemandFeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.on_demand_feature_views
)
elif isinstance(feature_view, RequestFeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.request_feature_views
)
else:
raise ValueError(f"Unexpected feature view type: {type(feature_view)}")
for idx, existing_feature_view_proto in enumerate(
existing_feature_views_of_same_type
):
if (
existing_feature_view_proto.spec.name == feature_view_proto.spec.name
and existing_feature_view_proto.spec.project == project
):
if (
feature_view.__class__.from_proto(existing_feature_view_proto)
== feature_view
):
return
else:
del existing_feature_views_of_same_type[idx]
break
existing_feature_views_of_same_type.append(feature_view_proto)
if commit:
self.commit()
def list_on_demand_feature_views(
self, project: str, allow_cache: bool = False
) -> List[OnDemandFeatureView]:
registry = self._get_registry_proto(allow_cache=allow_cache)
on_demand_feature_views = []
for on_demand_feature_view in registry.on_demand_feature_views:
if on_demand_feature_view.spec.project == project:
on_demand_feature_views.append(
OnDemandFeatureView.from_proto(on_demand_feature_view)
)
return on_demand_feature_views
def get_on_demand_feature_view(
self, name: str, project: str, allow_cache: bool = False
) -> OnDemandFeatureView:
registry = self._get_registry_proto(allow_cache=allow_cache)
for on_demand_feature_view in registry.on_demand_feature_views:
if (
on_demand_feature_view.spec.project == project
and on_demand_feature_view.spec.name == name
):
return OnDemandFeatureView.from_proto(on_demand_feature_view)
raise OnDemandFeatureViewNotFoundException(name, project=project)
def apply_materialization(
self,
feature_view: FeatureView,
project: str,
start_date: datetime,
end_date: datetime,
commit: bool = True,
):
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_feature_view_proto in enumerate(
self.cached_registry_proto.feature_views
):
if (
existing_feature_view_proto.spec.name == feature_view.name
and existing_feature_view_proto.spec.project == project
):
existing_feature_view = FeatureView.from_proto(
existing_feature_view_proto
)
existing_feature_view.materialization_intervals.append(
(start_date, end_date)
)
existing_feature_view.last_updated_timestamp = datetime.utcnow()
feature_view_proto = existing_feature_view.to_proto()
feature_view_proto.spec.project = project
del self.cached_registry_proto.feature_views[idx]
self.cached_registry_proto.feature_views.append(feature_view_proto)
if commit:
self.commit()
return
raise FeatureViewNotFoundException(feature_view.name, project)
def list_feature_views(
self, project: str, allow_cache: bool = False
) -> List[FeatureView]:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
feature_views: List[FeatureView] = []
for feature_view_proto in registry_proto.feature_views:
if feature_view_proto.spec.project == project:
feature_views.append(FeatureView.from_proto(feature_view_proto))
return feature_views
def list_request_feature_views(
self, project: str, allow_cache: bool = False
) -> List[RequestFeatureView]:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
feature_views: List[RequestFeatureView] = []
for request_feature_view_proto in registry_proto.request_feature_views:
if request_feature_view_proto.spec.project == project:
feature_views.append(
RequestFeatureView.from_proto(request_feature_view_proto)
)
return feature_views
def get_feature_view(
self, name: str, project: str, allow_cache: bool = False
) -> FeatureView:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for feature_view_proto in registry_proto.feature_views:
if (
feature_view_proto.spec.name == name
and feature_view_proto.spec.project == project
):
return FeatureView.from_proto(feature_view_proto)
raise FeatureViewNotFoundException(name, project)
def delete_feature_service(self, name: str, project: str, commit: bool = True):
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, feature_service_proto in enumerate(
self.cached_registry_proto.feature_services
):
if (
feature_service_proto.spec.name == name
and feature_service_proto.spec.project == project
):
del self.cached_registry_proto.feature_services[idx]
if commit:
self.commit()
return
raise FeatureServiceNotFoundException(name, project)
def delete_feature_view(self, name: str, project: str, commit: bool = True):
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_feature_view_proto in enumerate(
self.cached_registry_proto.feature_views
):
if (
existing_feature_view_proto.spec.name == name
and existing_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.feature_views[idx]
if commit:
self.commit()
return
for idx, existing_request_feature_view_proto in enumerate(
self.cached_registry_proto.request_feature_views
):
if (
existing_request_feature_view_proto.spec.name == name
and existing_request_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.request_feature_views[idx]
if commit:
self.commit()
return
for idx, existing_on_demand_feature_view_proto in enumerate(
self.cached_registry_proto.on_demand_feature_views
):
if (
existing_on_demand_feature_view_proto.spec.name == name
and existing_on_demand_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.on_demand_feature_views[idx]
if commit:
self.commit()
return
raise FeatureViewNotFoundException(name, project)
def delete_entity(self, name: str, project: str, commit: bool = True):
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_entity_proto in enumerate(
self.cached_registry_proto.entities
):
if (
existing_entity_proto.spec.name == name
and existing_entity_proto.spec.project == project
):
del self.cached_registry_proto.entities[idx]
if commit:
self.commit()
return
raise EntityNotFoundException(name, project)
def apply_saved_dataset(
self, saved_dataset: SavedDataset, project: str, commit: bool = True
):
now = datetime.utcnow()
if not saved_dataset.created_timestamp:
saved_dataset.created_timestamp = now
saved_dataset.last_updated_timestamp = now
saved_dataset_proto = saved_dataset.to_proto()
saved_dataset_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_saved_dataset_proto in enumerate(
self.cached_registry_proto.saved_datasets
):
if (
existing_saved_dataset_proto.spec.name == saved_dataset_proto.spec.name
and existing_saved_dataset_proto.spec.project == project
):
del self.cached_registry_proto.saved_datasets[idx]
break
self.cached_registry_proto.saved_datasets.append(saved_dataset_proto)
if commit:
self.commit()
def get_saved_dataset(
self, name: str, project: str, allow_cache: bool = False
) -> SavedDataset:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for saved_dataset in registry_proto.saved_datasets:
if (
saved_dataset.spec.name == name
and saved_dataset.spec.project == project
):
return SavedDataset.from_proto(saved_dataset)
raise SavedDatasetNotFound(name, project=project)
def list_saved_datasets(
self, project: str, allow_cache: bool = False
) -> List[SavedDataset]:
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
return [
SavedDataset.from_proto(saved_dataset)
for saved_dataset in registry_proto.saved_datasets
if saved_dataset.spec.project == project
]
def commit(self):
if self.cached_registry_proto:
self._registry_store.update_registry_proto(self.cached_registry_proto)
def refresh(self):
self._get_registry_proto(allow_cache=False)
def teardown(self):
self._registry_store.teardown()
def to_dict(self, project: str) -> Dict[str, List[Any]]:
registry_dict = defaultdict(list)
for entity in sorted(
self.list_entities(project=project), key=lambda entity: entity.name
):
registry_dict["entities"].append(
self._message_to_sorted_dict(entity.to_proto())
)
for feature_view in sorted(
self.list_feature_views(project=project),
key=lambda feature_view: feature_view.name,
):
registry_dict["featureViews"].append(
self._message_to_sorted_dict(feature_view.to_proto())
)
for feature_service in sorted(
self.list_feature_services(project=project),
key=lambda feature_service: feature_service.name,
):
registry_dict["featureServices"].append(
self._message_to_sorted_dict(feature_service.to_proto())
)
for on_demand_feature_view in sorted(
self.list_on_demand_feature_views(project=project),
key=lambda on_demand_feature_view: on_demand_feature_view.name,
):
registry_dict["onDemandFeatureViews"].append(
self._message_to_sorted_dict(on_demand_feature_view.to_proto())
)
for request_feature_view in sorted(
self.list_request_feature_views(project=project),
key=lambda request_feature_view: request_feature_view.name,
):
registry_dict["requestFeatureViews"].append(
self._message_to_sorted_dict(request_feature_view.to_proto())
)
for saved_dataset in sorted(
self.list_saved_datasets(project=project), key=lambda item: item.name
):
registry_dict["savedDatasets"].append(
self._message_to_sorted_dict(saved_dataset.to_proto())
)
for infra_object in sorted(self.get_infra(project=project).infra_objects):
registry_dict["infra"].append(
self._message_to_sorted_dict(infra_object.to_proto())
)
return registry_dict
@staticmethod
def _message_to_sorted_dict(message: Message) -> Dict[str, Any]:
return json.loads(MessageToJson(message, sort_keys=True))
def _prepare_registry_for_changes(self):
try:
self._get_registry_proto(allow_cache=True)
except FileNotFoundError:
registry_proto = RegistryProto()
registry_proto.registry_schema_version = REGISTRY_SCHEMA_VERSION
self.cached_registry_proto = registry_proto
self.cached_registry_proto_created = datetime.utcnow()
return self.cached_registry_proto
def _get_registry_proto(self, allow_cache: bool = False) -> RegistryProto:
with self._refresh_lock:
expired = (
self.cached_registry_proto is None
or self.cached_registry_proto_created is None
) or (
self.cached_registry_proto_ttl.total_seconds()
> 0
and (
datetime.utcnow()
> (
self.cached_registry_proto_created
+ self.cached_registry_proto_ttl
)
)
)
if allow_cache and not expired:
assert isinstance(self.cached_registry_proto, RegistryProto)
return self.cached_registry_proto
registry_proto = self._registry_store.get_registry_proto()
self.cached_registry_proto = registry_proto
self.cached_registry_proto_created = datetime.utcnow()
return registry_proto
def _check_conflicting_feature_view_names(self, feature_view: BaseFeatureView):
name_to_fv_protos = self._existing_feature_view_names_to_fvs()
if feature_view.name in name_to_fv_protos:
if not isinstance(
name_to_fv_protos.get(feature_view.name), feature_view.proto_class
):
raise ConflictingFeatureViewNames(feature_view.name)
def _existing_feature_view_names_to_fvs(self) -> Dict[str, Message]:
assert self.cached_registry_proto
odfvs = {
fv.spec.name: fv
for fv in self.cached_registry_proto.on_demand_feature_views
}
fvs = {fv.spec.name: fv for fv in self.cached_registry_proto.feature_views}
request_fvs = {
fv.spec.name: fv for fv in self.cached_registry_proto.request_feature_views
}
return {**odfvs, **fvs, **request_fvs}
| true | true |
1c47274e5d7cddc1fe325007a2b3162d454f0df8 | 11,308 | py | Python | edgeconnecttest/models.py | co-develop-drv/FGVC | 60d91f85ee48d757dd070e66984ea57d7e60f668 | [
"MIT"
] | 1,463 | 2020-09-13T22:55:35.000Z | 2022-03-30T20:34:32.000Z | edgeconnect/models.py | scqilin/FGVC | 9820d3c1a33ba402009ecb1d25e897cbcddc74d5 | [
"MIT"
] | 62 | 2020-09-24T02:57:06.000Z | 2022-03-01T01:48:39.000Z | edgeconnect/models.py | scqilin/FGVC | 9820d3c1a33ba402009ecb1d25e897cbcddc74d5 | [
"MIT"
] | 232 | 2020-09-21T02:13:54.000Z | 2022-03-16T22:11:28.000Z | import os
import torch
import torch.nn as nn
import torch.optim as optim
from .networks import InpaintGenerator, EdgeGenerator, Discriminator
from .loss import AdversarialLoss, PerceptualLoss, StyleLoss, TotalVariationalLoss
class BaseModel(nn.Module):
def __init__(self, name, config):
super(BaseModel, self).__init__()
self.name = name
self.config = config
self.iteration = 0
self.gen_weights_path = os.path.join(config.PATH, name + '_gen.pth')
self.dis_weights_path = os.path.join(config.PATH, name + '_dis.pth')
def load(self):
if os.path.exists(self.gen_weights_path):
print('Loading %s generator...' % self.name)
if torch.cuda.is_available():
data = torch.load(self.gen_weights_path)
else:
data = torch.load(self.gen_weights_path, map_location=lambda storage, loc: storage)
self.generator.load_state_dict(data['generator'])
self.iteration = data['iteration']
# load discriminator only when training
if self.config.MODE == 1 and os.path.exists(self.dis_weights_path):
print('Loading %s discriminator...' % self.name)
if torch.cuda.is_available():
data = torch.load(self.dis_weights_path)
else:
data = torch.load(self.dis_weights_path, map_location=lambda storage, loc: storage)
self.discriminator.load_state_dict(data['discriminator'])
def save(self):
print('\nsaving %s...\n' % self.name)
torch.save({
'iteration': self.iteration,
'generator': self.generator.state_dict()
}, self.gen_weights_path)
torch.save({
'discriminator': self.discriminator.state_dict()
}, self.dis_weights_path)
class EdgeModel(BaseModel):
def __init__(self, config):
super(EdgeModel, self).__init__('EdgeModel', config)
# generator input: [grayscale(1) + edge(1) + mask(1)]
# discriminator input: (grayscale(1) + edge(1))
generator = EdgeGenerator(use_spectral_norm=True)
discriminator = Discriminator(in_channels=2, use_sigmoid=config.GAN_LOSS != 'hinge')
if len(config.GPU) > 1:
generator = nn.DataParallel(generator, config.GPU)
discriminator = nn.DataParallel(discriminator, config.GPU)
l1_loss = nn.L1Loss()
adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)
self.add_module('generator', generator)
self.add_module('discriminator', discriminator)
self.add_module('l1_loss', l1_loss)
self.add_module('adversarial_loss', adversarial_loss)
self.gen_optimizer = optim.Adam(
params=generator.parameters(),
lr=float(config.LR),
betas=(config.BETA1, config.BETA2)
)
self.dis_optimizer = optim.Adam(
params=discriminator.parameters(),
lr=float(config.LR) * float(config.D2G_LR),
betas=(config.BETA1, config.BETA2)
)
def process(self, images, edges, masks):
self.iteration += 1
# zero optimizers
self.gen_optimizer.zero_grad()
self.dis_optimizer.zero_grad()
# process outputs
outputs = self(images, edges, masks)
gen_loss = 0
dis_loss = 0
# discriminator loss
dis_input_real = torch.cat((images, edges), dim=1)
dis_input_fake = torch.cat((images, outputs.detach()), dim=1)
dis_real, dis_real_feat = self.discriminator(dis_input_real) # in: (grayscale(1) + edge(1))
dis_fake, dis_fake_feat = self.discriminator(dis_input_fake) # in: (grayscale(1) + edge(1))
dis_real_loss = self.adversarial_loss(dis_real, True, True)
dis_fake_loss = self.adversarial_loss(dis_fake, False, True)
dis_loss += (dis_real_loss + dis_fake_loss) / 2
# generator adversarial loss
gen_input_fake = torch.cat((images, outputs), dim=1)
gen_fake, gen_fake_feat = self.discriminator(gen_input_fake) # in: (grayscale(1) + edge(1))
gen_gan_loss = self.adversarial_loss(gen_fake, True, False)
gen_loss += gen_gan_loss
# generator feature matching loss
gen_fm_loss = 0
for i in range(len(dis_real_feat)):
gen_fm_loss += self.l1_loss(gen_fake_feat[i], dis_real_feat[i].detach())
gen_fm_loss = gen_fm_loss * self.config.FM_LOSS_WEIGHT
gen_loss += gen_fm_loss
# create logs
logs = [
("l_d1", dis_loss.item()),
("l_g1", gen_gan_loss.item()),
("l_fm", gen_fm_loss.item()),
]
return outputs, gen_loss, dis_loss, logs
def forward(self, images, edges, masks):
edges_masked = (edges * (1 - masks))
images_masked = (images * (1 - masks)) + masks
inputs = torch.cat((images_masked, edges_masked, masks), dim=1)
outputs = self.generator(inputs) # in: [grayscale(1) + edge(1) + mask(1)]
return outputs
def backward(self, gen_loss=None, dis_loss=None):
if dis_loss is not None:
dis_loss.backward()
self.dis_optimizer.step()
if gen_loss is not None:
gen_loss.backward()
self.gen_optimizer.step()
class InpaintingModel(BaseModel):
def __init__(self, config):
super(InpaintingModel, self).__init__('InpaintingModel', config)
# generator input: [rgb(3) + edge(1)]
# discriminator input: [rgb(3)]
generator = InpaintGenerator(config)
self.config = config
if config.FLO == 1:
in_channels = 2
elif config.FLO == 0:
in_channels = 3
else:
assert(0)
discriminator = Discriminator(in_channels=in_channels, use_sigmoid=config.GAN_LOSS != 'hinge')
if len(config.GPU) > 1:
generator = nn.DataParallel(generator, config.GPU)
discriminator = nn.DataParallel(discriminator , config.GPU)
l1_loss = nn.L1Loss()
tv_loss = TotalVariationalLoss()
perceptual_loss = PerceptualLoss()
style_loss = StyleLoss()
adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)
self.add_module('generator', generator)
self.add_module('discriminator', discriminator)
self.add_module('l1_loss', l1_loss)
self.add_module('tv_loss', tv_loss)
self.add_module('perceptual_loss', perceptual_loss)
self.add_module('style_loss', style_loss)
self.add_module('adversarial_loss', adversarial_loss)
self.gen_optimizer = optim.Adam(
params=generator.parameters(),
lr=float(config.LR),
betas=(config.BETA1, config.BETA2)
)
self.dis_optimizer = optim.Adam(
params=discriminator.parameters(),
lr=float(config.LR) * float(config.D2G_LR),
betas=(config.BETA1, config.BETA2)
)
def process(self, images, images_filled, edges, masks):
self.iteration += 1
# zero optimizers
self.gen_optimizer.zero_grad()
self.dis_optimizer.zero_grad()
# process outputs
outputs = self(images, images_filled, edges, masks)
gen_loss = 0
dis_loss = 0
gen_gan_loss = 0
if self.config.GAN == 1:
# discriminator loss
dis_input_real = images
dis_input_fake = outputs.detach()
dis_real, _ = self.discriminator(dis_input_real) # in: [rgb(3)]
dis_fake, _ = self.discriminator(dis_input_fake) # in: [rgb(3)]
dis_real_loss = self.adversarial_loss(dis_real, True, True)
dis_fake_loss = self.adversarial_loss(dis_fake, False, True)
dis_loss += (dis_real_loss + dis_fake_loss) / 2
# generator adversarial loss
gen_input_fake = outputs
gen_fake, _ = self.discriminator(gen_input_fake) # in: [rgb(3)]
gen_gan_loss = self.adversarial_loss(gen_fake, True, False) * self.config.INPAINT_ADV_LOSS_WEIGHT
gen_loss += gen_gan_loss
# generator l1 loss
gen_l1_loss = self.l1_loss(outputs, images) * self.config.L1_LOSS_WEIGHT / torch.mean(masks)
gen_loss += gen_l1_loss
if self.config.ENFORCE == 1:
gen_l1_masked_loss = self.l1_loss(outputs * masks, images * masks) * 10 * self.config.L1_LOSS_WEIGHT
gen_loss += gen_l1_masked_loss
elif self.config.ENFORCE != 0:
assert(0)
if self.config.TV == 1:
# generator tv loss
gen_tv_loss = self.tv_loss(outputs) * self.config.TV_LOSS_WEIGHT
gen_loss += gen_tv_loss
if self.config.FLO != 1:
# generator perceptual loss
gen_content_loss = self.perceptual_loss(outputs, images)
gen_content_loss = gen_content_loss * self.config.CONTENT_LOSS_WEIGHT
gen_loss += gen_content_loss
# generator style loss
gen_style_loss = self.style_loss(outputs * masks, images * masks)
gen_style_loss = gen_style_loss * self.config.STYLE_LOSS_WEIGHT
gen_loss += gen_style_loss
# create logs
logs = [
("l_d2", dis_loss.item()),
("l_g2", gen_gan_loss.item()),
("l_l1", gen_l1_loss.item()),
("l_per", gen_content_loss.item()),
("l_sty", gen_style_loss.item()),
]
else:
logs = []
logs.append(("l_l1", gen_l1_loss.item()))
logs.append(("l_gen", gen_loss.item()))
if self.config.GAN == 1:
logs.append(("l_d2", dis_loss.item()))
logs.append(("l_g2", gen_gan_loss.item()))
if self.config.TV == 1:
logs.append(("l_tv", gen_tv_loss.item()))
if self.config.ENFORCE == 1:
logs.append(("l_masked_l1", gen_l1_masked_loss.item()))
return outputs, gen_loss, dis_loss, logs
def forward(self, images, images_filled, edges, masks):
if self.config.FILL == 1:
images_masked = images_filled
elif self.config.FILL == 0:
images_masked = (images * (1 - masks).float()) # + masks
else:
assert(0)
if self.config.PASSMASK == 1:
inputs = torch.cat((images_masked, edges, masks), dim=1)
elif self.config.PASSMASK == 0:
inputs = torch.cat((images_masked, edges), dim=1)
else:
assert(0)
outputs = self.generator(inputs)
# if self.config.RESIDUAL == 1:
# assert(self.config.PASSMASK == 1)
# outputs = self.generator(inputs) + images_filled
# elif self.config.RESIDUAL == 0:
# outputs = self.generator(inputs)
# else:
# assert(0)
return outputs
def backward(self, gen_loss=None, dis_loss=None):
if self.config.GAN == 1:
dis_loss.backward()
self.dis_optimizer.step()
gen_loss.backward()
self.gen_optimizer.step()
| 35.671924 | 116 | 0.595242 | import os
import torch
import torch.nn as nn
import torch.optim as optim
from .networks import InpaintGenerator, EdgeGenerator, Discriminator
from .loss import AdversarialLoss, PerceptualLoss, StyleLoss, TotalVariationalLoss
class BaseModel(nn.Module):
def __init__(self, name, config):
super(BaseModel, self).__init__()
self.name = name
self.config = config
self.iteration = 0
self.gen_weights_path = os.path.join(config.PATH, name + '_gen.pth')
self.dis_weights_path = os.path.join(config.PATH, name + '_dis.pth')
def load(self):
if os.path.exists(self.gen_weights_path):
print('Loading %s generator...' % self.name)
if torch.cuda.is_available():
data = torch.load(self.gen_weights_path)
else:
data = torch.load(self.gen_weights_path, map_location=lambda storage, loc: storage)
self.generator.load_state_dict(data['generator'])
self.iteration = data['iteration']
if self.config.MODE == 1 and os.path.exists(self.dis_weights_path):
print('Loading %s discriminator...' % self.name)
if torch.cuda.is_available():
data = torch.load(self.dis_weights_path)
else:
data = torch.load(self.dis_weights_path, map_location=lambda storage, loc: storage)
self.discriminator.load_state_dict(data['discriminator'])
def save(self):
print('\nsaving %s...\n' % self.name)
torch.save({
'iteration': self.iteration,
'generator': self.generator.state_dict()
}, self.gen_weights_path)
torch.save({
'discriminator': self.discriminator.state_dict()
}, self.dis_weights_path)
class EdgeModel(BaseModel):
def __init__(self, config):
super(EdgeModel, self).__init__('EdgeModel', config)
generator = EdgeGenerator(use_spectral_norm=True)
discriminator = Discriminator(in_channels=2, use_sigmoid=config.GAN_LOSS != 'hinge')
if len(config.GPU) > 1:
generator = nn.DataParallel(generator, config.GPU)
discriminator = nn.DataParallel(discriminator, config.GPU)
l1_loss = nn.L1Loss()
adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)
self.add_module('generator', generator)
self.add_module('discriminator', discriminator)
self.add_module('l1_loss', l1_loss)
self.add_module('adversarial_loss', adversarial_loss)
self.gen_optimizer = optim.Adam(
params=generator.parameters(),
lr=float(config.LR),
betas=(config.BETA1, config.BETA2)
)
self.dis_optimizer = optim.Adam(
params=discriminator.parameters(),
lr=float(config.LR) * float(config.D2G_LR),
betas=(config.BETA1, config.BETA2)
)
def process(self, images, edges, masks):
self.iteration += 1
self.gen_optimizer.zero_grad()
self.dis_optimizer.zero_grad()
outputs = self(images, edges, masks)
gen_loss = 0
dis_loss = 0
dis_input_real = torch.cat((images, edges), dim=1)
dis_input_fake = torch.cat((images, outputs.detach()), dim=1)
dis_real, dis_real_feat = self.discriminator(dis_input_real)
dis_fake, dis_fake_feat = self.discriminator(dis_input_fake)
dis_real_loss = self.adversarial_loss(dis_real, True, True)
dis_fake_loss = self.adversarial_loss(dis_fake, False, True)
dis_loss += (dis_real_loss + dis_fake_loss) / 2
gen_input_fake = torch.cat((images, outputs), dim=1)
gen_fake, gen_fake_feat = self.discriminator(gen_input_fake)
gen_gan_loss = self.adversarial_loss(gen_fake, True, False)
gen_loss += gen_gan_loss
gen_fm_loss = 0
for i in range(len(dis_real_feat)):
gen_fm_loss += self.l1_loss(gen_fake_feat[i], dis_real_feat[i].detach())
gen_fm_loss = gen_fm_loss * self.config.FM_LOSS_WEIGHT
gen_loss += gen_fm_loss
logs = [
("l_d1", dis_loss.item()),
("l_g1", gen_gan_loss.item()),
("l_fm", gen_fm_loss.item()),
]
return outputs, gen_loss, dis_loss, logs
def forward(self, images, edges, masks):
edges_masked = (edges * (1 - masks))
images_masked = (images * (1 - masks)) + masks
inputs = torch.cat((images_masked, edges_masked, masks), dim=1)
outputs = self.generator(inputs)
return outputs
def backward(self, gen_loss=None, dis_loss=None):
if dis_loss is not None:
dis_loss.backward()
self.dis_optimizer.step()
if gen_loss is not None:
gen_loss.backward()
self.gen_optimizer.step()
class InpaintingModel(BaseModel):
def __init__(self, config):
super(InpaintingModel, self).__init__('InpaintingModel', config)
generator = InpaintGenerator(config)
self.config = config
if config.FLO == 1:
in_channels = 2
elif config.FLO == 0:
in_channels = 3
else:
assert(0)
discriminator = Discriminator(in_channels=in_channels, use_sigmoid=config.GAN_LOSS != 'hinge')
if len(config.GPU) > 1:
generator = nn.DataParallel(generator, config.GPU)
discriminator = nn.DataParallel(discriminator , config.GPU)
l1_loss = nn.L1Loss()
tv_loss = TotalVariationalLoss()
perceptual_loss = PerceptualLoss()
style_loss = StyleLoss()
adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)
self.add_module('generator', generator)
self.add_module('discriminator', discriminator)
self.add_module('l1_loss', l1_loss)
self.add_module('tv_loss', tv_loss)
self.add_module('perceptual_loss', perceptual_loss)
self.add_module('style_loss', style_loss)
self.add_module('adversarial_loss', adversarial_loss)
self.gen_optimizer = optim.Adam(
params=generator.parameters(),
lr=float(config.LR),
betas=(config.BETA1, config.BETA2)
)
self.dis_optimizer = optim.Adam(
params=discriminator.parameters(),
lr=float(config.LR) * float(config.D2G_LR),
betas=(config.BETA1, config.BETA2)
)
def process(self, images, images_filled, edges, masks):
self.iteration += 1
self.gen_optimizer.zero_grad()
self.dis_optimizer.zero_grad()
outputs = self(images, images_filled, edges, masks)
gen_loss = 0
dis_loss = 0
gen_gan_loss = 0
if self.config.GAN == 1:
dis_input_real = images
dis_input_fake = outputs.detach()
dis_real, _ = self.discriminator(dis_input_real)
dis_fake, _ = self.discriminator(dis_input_fake)
dis_real_loss = self.adversarial_loss(dis_real, True, True)
dis_fake_loss = self.adversarial_loss(dis_fake, False, True)
dis_loss += (dis_real_loss + dis_fake_loss) / 2
gen_input_fake = outputs
gen_fake, _ = self.discriminator(gen_input_fake)
gen_gan_loss = self.adversarial_loss(gen_fake, True, False) * self.config.INPAINT_ADV_LOSS_WEIGHT
gen_loss += gen_gan_loss
gen_l1_loss = self.l1_loss(outputs, images) * self.config.L1_LOSS_WEIGHT / torch.mean(masks)
gen_loss += gen_l1_loss
if self.config.ENFORCE == 1:
gen_l1_masked_loss = self.l1_loss(outputs * masks, images * masks) * 10 * self.config.L1_LOSS_WEIGHT
gen_loss += gen_l1_masked_loss
elif self.config.ENFORCE != 0:
assert(0)
if self.config.TV == 1:
gen_tv_loss = self.tv_loss(outputs) * self.config.TV_LOSS_WEIGHT
gen_loss += gen_tv_loss
if self.config.FLO != 1:
gen_content_loss = self.perceptual_loss(outputs, images)
gen_content_loss = gen_content_loss * self.config.CONTENT_LOSS_WEIGHT
gen_loss += gen_content_loss
gen_style_loss = self.style_loss(outputs * masks, images * masks)
gen_style_loss = gen_style_loss * self.config.STYLE_LOSS_WEIGHT
gen_loss += gen_style_loss
logs = [
("l_d2", dis_loss.item()),
("l_g2", gen_gan_loss.item()),
("l_l1", gen_l1_loss.item()),
("l_per", gen_content_loss.item()),
("l_sty", gen_style_loss.item()),
]
else:
logs = []
logs.append(("l_l1", gen_l1_loss.item()))
logs.append(("l_gen", gen_loss.item()))
if self.config.GAN == 1:
logs.append(("l_d2", dis_loss.item()))
logs.append(("l_g2", gen_gan_loss.item()))
if self.config.TV == 1:
logs.append(("l_tv", gen_tv_loss.item()))
if self.config.ENFORCE == 1:
logs.append(("l_masked_l1", gen_l1_masked_loss.item()))
return outputs, gen_loss, dis_loss, logs
def forward(self, images, images_filled, edges, masks):
if self.config.FILL == 1:
images_masked = images_filled
elif self.config.FILL == 0:
images_masked = (images * (1 - masks).float())
else:
assert(0)
if self.config.PASSMASK == 1:
inputs = torch.cat((images_masked, edges, masks), dim=1)
elif self.config.PASSMASK == 0:
inputs = torch.cat((images_masked, edges), dim=1)
else:
assert(0)
outputs = self.generator(inputs)
return outputs
def backward(self, gen_loss=None, dis_loss=None):
if self.config.GAN == 1:
dis_loss.backward()
self.dis_optimizer.step()
gen_loss.backward()
self.gen_optimizer.step()
| true | true |
1c472771d828e97cb35a1c49f80939e70dcd8102 | 6,888 | py | Python | samples/openapi3/client/petstore/python/petstore_api/model/number_with_validations.py | gasugesu/openapi-generator | e1c43f135639b9f300350f788fec98bbc375c932 | [
"Apache-2.0"
] | 3 | 2021-05-19T03:12:48.000Z | 2022-01-28T19:15:42.000Z | samples/openapi3/client/petstore/python/petstore_api/model/number_with_validations.py | gasugesu/openapi-generator | e1c43f135639b9f300350f788fec98bbc375c932 | [
"Apache-2.0"
] | 3 | 2021-05-11T23:55:26.000Z | 2022-02-27T11:17:21.000Z | samples/openapi3/client/petstore/python/petstore_api/model/number_with_validations.py | gasugesu/openapi-generator | e1c43f135639b9f300350f788fec98bbc375c932 | [
"Apache-2.0"
] | 1 | 2020-10-05T11:13:04.000Z | 2020-10-05T11:13:04.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NumberWithValidations(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('value',): {
'inclusive_maximum': 20,
'inclusive_minimum': 10,
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (float,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""NumberWithValidations - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (float): # noqa: E501
Keyword Args:
value (float): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| 37.232432 | 174 | 0.571138 |
import re
import sys
import nulltype
from petstore_api.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NumberWithValidations(ModelSimple):
allowed_values = {
}
validations = {
('value',): {
'inclusive_maximum': 20,
'inclusive_minimum': 10,
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
return {
'value': (float,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| true | true |
1c4727c1218907e3faea73ec7e26cd2e8292e3d2 | 1,709 | py | Python | libs/elfutils/elfutils.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-09-04T09:01:03.000Z | 2022-01-04T20:09:00.000Z | libs/elfutils/elfutils.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-12-15T08:11:22.000Z | 2020-12-29T19:11:13.000Z | libs/elfutils/elfutils.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 19 | 2017-09-05T19:16:21.000Z | 2020-10-18T12:46:06.000Z | # -*- coding: utf-8 -*-
import info
from Package.AutoToolsPackageBase import AutoToolsPackageBase
from Package.CMakePackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
for ver in ['0.181', '0.185']:
self.targets[ver] = 'https://sourceware.org/elfutils/ftp/%s/elfutils-%s.tar.bz2' % (ver, ver)
self.targetInstSrc[ver] = "elfutils-" + ver
self.patchLevel[ver] = 0
self.targetDigests['0.181'] = (['d565541d5817f409dc89ebb1ee593366f69c371a1531308eeb67ff934b14a0fab0c9009fd7c23240efbaa1b4e04edac5c425e47d80e3e66ba03dcaf000afea36'], CraftHash.HashAlgorithm.SHA512)
self.targetDigests['0.185'] = (['34de0de1355b11740e036e0fc64f2fc063587c8eb121b19216ee5548d3f0f268d8fc3995176c47190466b9d881007cfa11a9d01e9a50e38af6119492bf8bb47f'], CraftHash.HashAlgorithm.SHA512)
self.description = 'elfutils is a collection of utilities and libraries to read, create and modify ELF binary files, find and handle DWARF debug data, symbols, thread state and stacktraces for processes and core files on GNU/Linux.'
self.defaultTarget = '0.185'
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/zlib"] = None
self.runtimeDependencies["libs/liblzma"] = None
self.runtimeDependencies["libs/libdwarf"] = None
class Package(AutoToolsPackageBase):
def __init__(self, **args):
AutoToolsPackageBase.__init__(self)
self.subinfo.options.configure.autoreconf = False
self.subinfo.options.configure.args += " --disable-debuginfod "
self.subinfo.options.configure.ldflags += " -lintl"
self.platform = ""
| 51.787879 | 240 | 0.725571 |
import info
from Package.AutoToolsPackageBase import AutoToolsPackageBase
from Package.CMakePackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
for ver in ['0.181', '0.185']:
self.targets[ver] = 'https://sourceware.org/elfutils/ftp/%s/elfutils-%s.tar.bz2' % (ver, ver)
self.targetInstSrc[ver] = "elfutils-" + ver
self.patchLevel[ver] = 0
self.targetDigests['0.181'] = (['d565541d5817f409dc89ebb1ee593366f69c371a1531308eeb67ff934b14a0fab0c9009fd7c23240efbaa1b4e04edac5c425e47d80e3e66ba03dcaf000afea36'], CraftHash.HashAlgorithm.SHA512)
self.targetDigests['0.185'] = (['34de0de1355b11740e036e0fc64f2fc063587c8eb121b19216ee5548d3f0f268d8fc3995176c47190466b9d881007cfa11a9d01e9a50e38af6119492bf8bb47f'], CraftHash.HashAlgorithm.SHA512)
self.description = 'elfutils is a collection of utilities and libraries to read, create and modify ELF binary files, find and handle DWARF debug data, symbols, thread state and stacktraces for processes and core files on GNU/Linux.'
self.defaultTarget = '0.185'
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/zlib"] = None
self.runtimeDependencies["libs/liblzma"] = None
self.runtimeDependencies["libs/libdwarf"] = None
class Package(AutoToolsPackageBase):
def __init__(self, **args):
AutoToolsPackageBase.__init__(self)
self.subinfo.options.configure.autoreconf = False
self.subinfo.options.configure.args += " --disable-debuginfod "
self.subinfo.options.configure.ldflags += " -lintl"
self.platform = ""
| true | true |
1c472884e3e30c55677ee3830a3d39ebae658645 | 14,991 | py | Python | beartype_test/a00_unit/a90_decor/code/pep/test_pepscope.py | posita/beartype | e56399686e1f2ffd5128a4030b19314504e32450 | [
"MIT"
] | null | null | null | beartype_test/a00_unit/a90_decor/code/pep/test_pepscope.py | posita/beartype | e56399686e1f2ffd5128a4030b19314504e32450 | [
"MIT"
] | null | null | null | beartype_test/a00_unit/a90_decor/code/pep/test_pepscope.py | posita/beartype | e56399686e1f2ffd5128a4030b19314504e32450 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype decorator PEP-compliant code wrapper scope utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._decor._code._pep._pepscope` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS ~ adder : type }....................
def test_add_func_scope_type_pass() -> None:
'''
Test successful usage of the
:func:`beartype._decor._code._pep._pepscope.add_func_scope_type` function.
'''
# Defer heavyweight imports.
from beartype.roar._roarexc import _BeartypeDecorBeartypistryException
from beartype._cave._cavefast import NoneType, RegexCompiledType
from beartype._decor._code._pep._pepscope import add_func_scope_type
from beartype._util.utilobject import get_object_type_basename
# Arbitrary scope to be added to below.
func_scope = {}
# Assert this function supports...
classes_nonbuiltin = (
# Adding a non-builtin type.
RegexCompiledType,
# Readding that same type.
RegexCompiledType,
# Adding the type of the "None" singleton (despite technically being
# listed as belonging to the "builtin" module) under a unique name
# rather than its unqualified basename "NoneType" (which doesn't
# actually exist, which is inconsistent nonsense, but whatever).
NoneType,
)
for cls in classes_nonbuiltin:
cls_scope_name = add_func_scope_type(cls=cls, func_scope=func_scope)
assert cls_scope_name != get_object_type_basename(cls)
assert func_scope[cls_scope_name] is cls
# Assert this function does *NOT* add builtin types but instead simply
# returns the unqualified basenames of those types.
cls = list
cls_scope_name = add_func_scope_type(cls=cls, func_scope=func_scope)
assert cls_scope_name == get_object_type_basename(cls)
assert cls_scope_name not in func_scope
def test_add_func_scope_type_fail() -> None:
'''
Test unsuccessful usage of the
:func:`beartype._decor._code._pep._pepscope.add_func_scope_type` function.
'''
# Defer heavyweight imports.
from beartype.roar import BeartypeDecorHintPep3119Exception
from beartype._decor._code._pep._pepscope import add_func_scope_type
from beartype_test.a00_unit.data.data_type import NonIsinstanceableClass
from pytest import raises
# Arbitrary scope to be added to below.
func_scope = {}
# Assert this function raises the expected exception for non-types.
with raises(BeartypeDecorHintPep3119Exception):
add_func_scope_type(
cls=(
'The best lack all conviction, while the worst',
'Are full of passionate intensity',
),
func_scope=func_scope,
)
# Assert this function raises the expected exception for PEP 560-compliant
# classes whose metaclasses define an __instancecheck__() dunder method to
# unconditionally raise exceptions.
with raises(BeartypeDecorHintPep3119Exception):
add_func_scope_type(cls=NonIsinstanceableClass, func_scope=func_scope)
# ....................{ TESTS ~ adder : tuple }....................
def test_add_func_scope_types_pass() -> None:
'''
Test successful usage of the
:func:`beartype._decor._code._pep._pepscope.add_func_scope_types` function.
'''
# Defer heavyweight imports.
from beartype.roar._roarexc import _BeartypeDecorBeartypistryException
from beartype._cave._cavefast import CallableTypes, ModuleOrStrTypes
from beartype._cave._cavemap import NoneTypeOr
from beartype._decor._code._pep._pepscope import add_func_scope_types
from beartype._util.utilobject import get_object_type_basename
from beartype_test.a00_unit.data.data_type import Class
# Arbitrary scope to be added to below.
func_scope = {}
# Assert this function adds a tuple of one or more standard types.
#
# Note that, unlike types, tuples are internally added under different
# objects than their originals (e.g., to ignore both duplicates and
# ordering) and *MUST* thus be tested by conversion to sets.
types = CallableTypes
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope)
assert set(types) == set(func_scope[types_scope_name])
# Assert this function readds the same tuple as well.
types_scope_name_again = add_func_scope_types(
types=types, func_scope=func_scope)
assert types_scope_name == types_scope_name_again
# Assert this function adds a frozenset of one or more standard types.
types = frozenset(ModuleOrStrTypes)
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope)
assert set(types) == set(func_scope[types_scope_name])
# Assert this function does *NOT* add tuples of one non-builtin types but
# instead simply returns the unqualified basenames of those types.
types = (int,)
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope)
assert types_scope_name == get_object_type_basename(types[0])
assert types_scope_name not in func_scope
# Assert this function adds tuples of one non-builtin type as merely that
# type rather than that tuple.
types = (Class,)
types_scope_name = add_func_scope_types(types=types, func_scope=func_scope)
assert func_scope[types_scope_name] is Class
# Assert this function adds tuples containing duplicate types as tuples
# containing only the proper subset of non-duplicate types.
types = (Class,)*3
types_scope_name = add_func_scope_types(types=types, func_scope=func_scope)
assert func_scope[types_scope_name] == (Class,)
# Assert this function registers tuples containing *NO* duplicate types.
types = NoneTypeOr[CallableTypes]
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope, is_unique=True)
assert func_scope[types_scope_name] == types
#FIXME: Disable this until we drop Python 3.6 support. While Python >= 3.7
#preserves insertion order for sets, Python < 3.7 does *NOT*.
# # Assert that tuples of the same types but in different orders are
# # registrable via the same function but reduce to differing objects.
# hint_a = (int, str,)
# hint_b = (str, int,)
# hint_cached_a = _eval_registered_expr(register_typistry_tuple(hint_a))
# hint_cached_b = _eval_registered_expr(register_typistry_tuple(hint_b))
# assert hint_cached_a != hint_cached_b
def test_add_func_scope_types_fail() -> None:
'''
Test unsuccessful usage of the
:func:`beartype._decor._code._pep._pepscope.add_func_scope_types` function.
'''
# Defer heavyweight imports
from beartype.roar import BeartypeDecorHintNonpepException
from beartype._decor._code._pep._pepscope import add_func_scope_types
from beartype_test.a00_unit.data.data_type import NonIsinstanceableClass
from beartype_test.a00_unit.data.hint.pep.proposal.data_pep484 import (
Pep484GenericTypevaredSingle)
from pytest import raises
# Arbitrary scope to be added to below.
func_scope = {}
# Assert this function raises the expected exception for unhashable tuples.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types=(
int, str, {
'Had': "I the heaven’s embroidered cloths,",
'Enwrought': "with golden and silver light,",
'The': 'blue and the dim and the dark cloths',
'Of': 'night and light and the half-light,',
'I': 'would spread the cloths under your feet:',
'But': 'I, being poor, have only my dreams;',
'I have': 'spread my dreams under your feet;',
'Tread': 'softly because you tread on my dreams.',
},
),
func_scope=func_scope,
)
# Assert this function raises the expected exception for non-tuples.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types='\n'.join((
'I will arise and go now, and go to Innisfree,',
'And a small cabin build there, of clay and wattles made;',
'Nine bean-rows will I have there, a hive for the honey-bee,',
'And live alone in the bee-loud glade.',
)),
func_scope=func_scope,
)
# Assert this function raises the expected exception for empty tuples.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(types=(), func_scope=func_scope)
# Assert this function raises the expected exception for tuples containing
# one or more PEP-compliant types.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types=(int, Pep484GenericTypevaredSingle, str,),
func_scope=func_scope,
)
# Assert this function raises the expected exception for tuples containing
# one or more PEP 560-compliant classes whose metaclasses define an
# __instancecheck__() dunder method to unconditionally raise exceptions.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types=(bool, NonIsinstanceableClass, float,),
func_scope=func_scope,
)
# ....................{ TESTS ~ expresser : type }....................
def test_express_func_scope_type_forwardref() -> None:
'''
Test the
:func:`beartype._decor._code._pep._pepscope.express_func_scope_type_forwardref`
function.
'''
# Defer heavyweight imports.
from beartype.roar import BeartypeDecorHintForwardRefException
from beartype._decor._cache.cachetype import bear_typistry
from beartype._decor._code.codemagic import ARG_NAME_TYPISTRY
from beartype._decor._code._pep._pepscope import (
express_func_scope_type_forwardref)
from beartype._util.hint.pep.proposal.pep484.utilpep484ref import (
HINT_PEP484_FORWARDREF_TYPE)
from pytest import raises
# Arbitrary scope to be added to below.
func_scope = {}
# Set of the unqualified classnames referred to by all relative forward
# references relative to this scope if any *OR* "None" otherwise (i.e., if
# no such references have been expressed relative to this scope yet).
forwardrefs_class_basename = None
# Fully-qualified classname of a non-existing class.
CLASSNAME_QUALIFIED = 'Thy.giant.brood.of.pines.around.thee.clinging'
# Unqualified classname of a non-existing class.
CLASSNAME_UNQUALIFIED = 'Children_of_elder_time_in_whose_devotion'
# Tuple of all PEP-compliant forward references to this fully-qualified
# class, including...
FORWARDREFS_QUALIFIED = (
# PEP 484-compliant forward reference to this class.
HINT_PEP484_FORWARDREF_TYPE(CLASSNAME_QUALIFIED),
# PEP 585-compliant forward reference to this class.
CLASSNAME_QUALIFIED,
)
# Tuple of all PEP-compliant forward references to this unqualified class,
# including...
FORWARDREFS_UNQUALIFIED = (
# PEP 484-compliant forward reference to this class.
HINT_PEP484_FORWARDREF_TYPE(CLASSNAME_UNQUALIFIED),
# PEP 585-compliant forward reference to this class.
CLASSNAME_UNQUALIFIED,
)
# For each PEP-compliant forward reference to a fully-qualified class...
for forwardref_qualified in FORWARDREFS_QUALIFIED:
# Express a fully-qualified forward reference to a non-existing class.
forwardref_expr, forwardrefs_class_basename = (
express_func_scope_type_forwardref(
forwardref=forwardref_qualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
# Assert this expression references this class.
assert CLASSNAME_QUALIFIED in forwardref_expr
# Assert this set remains empty.
assert forwardrefs_class_basename is None
# Assert the beartypistry singleton has been added to this scope as a
# private "__beartypistry" attribute.
assert func_scope[ARG_NAME_TYPISTRY] is bear_typistry
# Assert this function rexpresses the same forward reference.
forwardref_expr_again, forwardrefs_class_basename_again = (
express_func_scope_type_forwardref(
forwardref=forwardref_qualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
assert forwardref_expr_again == forwardref_expr
assert forwardrefs_class_basename_again is forwardrefs_class_basename
# For each PEP-compliant forward reference to an unqualified class...
for forwardref_unqualified in FORWARDREFS_UNQUALIFIED:
# Express an unqualified forward reference to a non-existing class.
forwardref_expr, forwardrefs_class_basename = (
express_func_scope_type_forwardref(
forwardref=forwardref_unqualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
# Assert this expression references this class.
assert CLASSNAME_UNQUALIFIED in forwardref_expr
# Assert this set now contains only this classname.
assert forwardrefs_class_basename == {CLASSNAME_UNQUALIFIED,}
# Assert this function rexpresses the same forward reference.
forwardref_expr_again, forwardrefs_class_basename_again = (
express_func_scope_type_forwardref(
forwardref=forwardref_unqualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
assert forwardref_expr_again == forwardref_expr
assert forwardrefs_class_basename_again == {CLASSNAME_UNQUALIFIED,}
# Assert this function raises the expected exception for arbitrary objects
# that are *NOT* forward references.
with raises(BeartypeDecorHintForwardRefException):
express_func_scope_type_forwardref(
forwardref=b'The chainless winds still come and ever came',
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
)
| 42.954155 | 83 | 0.686745 |
def test_add_func_scope_type_pass() -> None:
from beartype.roar._roarexc import _BeartypeDecorBeartypistryException
from beartype._cave._cavefast import NoneType, RegexCompiledType
from beartype._decor._code._pep._pepscope import add_func_scope_type
from beartype._util.utilobject import get_object_type_basename
func_scope = {}
classes_nonbuiltin = (
RegexCompiledType,
RegexCompiledType,
# actually exist, which is inconsistent nonsense, but whatever).
NoneType,
)
for cls in classes_nonbuiltin:
cls_scope_name = add_func_scope_type(cls=cls, func_scope=func_scope)
assert cls_scope_name != get_object_type_basename(cls)
assert func_scope[cls_scope_name] is cls
# Assert this function does *NOT* add builtin types but instead simply
# returns the unqualified basenames of those types.
cls = list
cls_scope_name = add_func_scope_type(cls=cls, func_scope=func_scope)
assert cls_scope_name == get_object_type_basename(cls)
assert cls_scope_name not in func_scope
def test_add_func_scope_type_fail() -> None:
# Defer heavyweight imports.
from beartype.roar import BeartypeDecorHintPep3119Exception
from beartype._decor._code._pep._pepscope import add_func_scope_type
from beartype_test.a00_unit.data.data_type import NonIsinstanceableClass
from pytest import raises
# Arbitrary scope to be added to below.
func_scope = {}
# Assert this function raises the expected exception for non-types.
with raises(BeartypeDecorHintPep3119Exception):
add_func_scope_type(
cls=(
'The best lack all conviction, while the worst',
'Are full of passionate intensity',
),
func_scope=func_scope,
)
# Assert this function raises the expected exception for PEP 560-compliant
# classes whose metaclasses define an __instancecheck__() dunder method to
# unconditionally raise exceptions.
with raises(BeartypeDecorHintPep3119Exception):
add_func_scope_type(cls=NonIsinstanceableClass, func_scope=func_scope)
# ....................{ TESTS ~ adder : tuple }....................
def test_add_func_scope_types_pass() -> None:
# Defer heavyweight imports.
from beartype.roar._roarexc import _BeartypeDecorBeartypistryException
from beartype._cave._cavefast import CallableTypes, ModuleOrStrTypes
from beartype._cave._cavemap import NoneTypeOr
from beartype._decor._code._pep._pepscope import add_func_scope_types
from beartype._util.utilobject import get_object_type_basename
from beartype_test.a00_unit.data.data_type import Class
# Arbitrary scope to be added to below.
func_scope = {}
# Assert this function adds a tuple of one or more standard types.
#
# Note that, unlike types, tuples are internally added under different
# objects than their originals (e.g., to ignore both duplicates and
# ordering) and *MUST* thus be tested by conversion to sets.
types = CallableTypes
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope)
assert set(types) == set(func_scope[types_scope_name])
# Assert this function readds the same tuple as well.
types_scope_name_again = add_func_scope_types(
types=types, func_scope=func_scope)
assert types_scope_name == types_scope_name_again
# Assert this function adds a frozenset of one or more standard types.
types = frozenset(ModuleOrStrTypes)
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope)
assert set(types) == set(func_scope[types_scope_name])
# Assert this function does *NOT* add tuples of one non-builtin types but
# instead simply returns the unqualified basenames of those types.
types = (int,)
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope)
assert types_scope_name == get_object_type_basename(types[0])
assert types_scope_name not in func_scope
# Assert this function adds tuples of one non-builtin type as merely that
# type rather than that tuple.
types = (Class,)
types_scope_name = add_func_scope_types(types=types, func_scope=func_scope)
assert func_scope[types_scope_name] is Class
# Assert this function adds tuples containing duplicate types as tuples
# containing only the proper subset of non-duplicate types.
types = (Class,)*3
types_scope_name = add_func_scope_types(types=types, func_scope=func_scope)
assert func_scope[types_scope_name] == (Class,)
# Assert this function registers tuples containing *NO* duplicate types.
types = NoneTypeOr[CallableTypes]
types_scope_name = add_func_scope_types(
types=types, func_scope=func_scope, is_unique=True)
assert func_scope[types_scope_name] == types
#FIXME: Disable this until we drop Python 3.6 support. While Python >= 3.7
#preserves insertion order for sets, Python < 3.7 does *NOT*.
# # Assert that tuples of the same types but in different orders are
# # registrable via the same function but reduce to differing objects.
# hint_a = (int, str,)
# hint_b = (str, int,)
# hint_cached_a = _eval_registered_expr(register_typistry_tuple(hint_a))
# hint_cached_b = _eval_registered_expr(register_typistry_tuple(hint_b))
# assert hint_cached_a != hint_cached_b
def test_add_func_scope_types_fail() -> None:
# Defer heavyweight imports
from beartype.roar import BeartypeDecorHintNonpepException
from beartype._decor._code._pep._pepscope import add_func_scope_types
from beartype_test.a00_unit.data.data_type import NonIsinstanceableClass
from beartype_test.a00_unit.data.hint.pep.proposal.data_pep484 import (
Pep484GenericTypevaredSingle)
from pytest import raises
# Arbitrary scope to be added to below.
func_scope = {}
# Assert this function raises the expected exception for unhashable tuples.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types=(
int, str, {
'Had': "I the heaven’s embroidered cloths,",
'Enwrought': "with golden and silver light,",
'The': 'blue and the dim and the dark cloths',
'Of': 'night and light and the half-light,',
'I': 'would spread the cloths under your feet:',
'But': 'I, being poor, have only my dreams;',
'I have': 'spread my dreams under your feet;',
'Tread': 'softly because you tread on my dreams.',
},
),
func_scope=func_scope,
)
# Assert this function raises the expected exception for non-tuples.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types='\n'.join((
'I will arise and go now, and go to Innisfree,',
'And a small cabin build there, of clay and wattles made;',
'Nine bean-rows will I have there, a hive for the honey-bee,',
'And live alone in the bee-loud glade.',
)),
func_scope=func_scope,
)
# Assert this function raises the expected exception for empty tuples.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(types=(), func_scope=func_scope)
# Assert this function raises the expected exception for tuples containing
# one or more PEP-compliant types.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types=(int, Pep484GenericTypevaredSingle, str,),
func_scope=func_scope,
)
# Assert this function raises the expected exception for tuples containing
# one or more PEP 560-compliant classes whose metaclasses define an
# __instancecheck__() dunder method to unconditionally raise exceptions.
with raises(BeartypeDecorHintNonpepException):
add_func_scope_types(
types=(bool, NonIsinstanceableClass, float,),
func_scope=func_scope,
)
# ....................{ TESTS ~ expresser : type }....................
def test_express_func_scope_type_forwardref() -> None:
# Defer heavyweight imports.
from beartype.roar import BeartypeDecorHintForwardRefException
from beartype._decor._cache.cachetype import bear_typistry
from beartype._decor._code.codemagic import ARG_NAME_TYPISTRY
from beartype._decor._code._pep._pepscope import (
express_func_scope_type_forwardref)
from beartype._util.hint.pep.proposal.pep484.utilpep484ref import (
HINT_PEP484_FORWARDREF_TYPE)
from pytest import raises
# Arbitrary scope to be added to below.
func_scope = {}
# Set of the unqualified classnames referred to by all relative forward
# references relative to this scope if any *OR* "None" otherwise (i.e., if
# no such references have been expressed relative to this scope yet).
forwardrefs_class_basename = None
# Fully-qualified classname of a non-existing class.
CLASSNAME_QUALIFIED = 'Thy.giant.brood.of.pines.around.thee.clinging'
# Unqualified classname of a non-existing class.
CLASSNAME_UNQUALIFIED = 'Children_of_elder_time_in_whose_devotion'
# Tuple of all PEP-compliant forward references to this fully-qualified
# class, including...
FORWARDREFS_QUALIFIED = (
# PEP 484-compliant forward reference to this class.
HINT_PEP484_FORWARDREF_TYPE(CLASSNAME_QUALIFIED),
# PEP 585-compliant forward reference to this class.
CLASSNAME_QUALIFIED,
)
# Tuple of all PEP-compliant forward references to this unqualified class,
# including...
FORWARDREFS_UNQUALIFIED = (
# PEP 484-compliant forward reference to this class.
HINT_PEP484_FORWARDREF_TYPE(CLASSNAME_UNQUALIFIED),
# PEP 585-compliant forward reference to this class.
CLASSNAME_UNQUALIFIED,
)
# For each PEP-compliant forward reference to a fully-qualified class...
for forwardref_qualified in FORWARDREFS_QUALIFIED:
# Express a fully-qualified forward reference to a non-existing class.
forwardref_expr, forwardrefs_class_basename = (
express_func_scope_type_forwardref(
forwardref=forwardref_qualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
# Assert this expression references this class.
assert CLASSNAME_QUALIFIED in forwardref_expr
# Assert this set remains empty.
assert forwardrefs_class_basename is None
# Assert the beartypistry singleton has been added to this scope as a
# private "__beartypistry" attribute.
assert func_scope[ARG_NAME_TYPISTRY] is bear_typistry
# Assert this function rexpresses the same forward reference.
forwardref_expr_again, forwardrefs_class_basename_again = (
express_func_scope_type_forwardref(
forwardref=forwardref_qualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
assert forwardref_expr_again == forwardref_expr
assert forwardrefs_class_basename_again is forwardrefs_class_basename
# For each PEP-compliant forward reference to an unqualified class...
for forwardref_unqualified in FORWARDREFS_UNQUALIFIED:
# Express an unqualified forward reference to a non-existing class.
forwardref_expr, forwardrefs_class_basename = (
express_func_scope_type_forwardref(
forwardref=forwardref_unqualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
# Assert this expression references this class.
assert CLASSNAME_UNQUALIFIED in forwardref_expr
# Assert this set now contains only this classname.
assert forwardrefs_class_basename == {CLASSNAME_UNQUALIFIED,}
# Assert this function rexpresses the same forward reference.
forwardref_expr_again, forwardrefs_class_basename_again = (
express_func_scope_type_forwardref(
forwardref=forwardref_unqualified,
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
))
assert forwardref_expr_again == forwardref_expr
assert forwardrefs_class_basename_again == {CLASSNAME_UNQUALIFIED,}
# Assert this function raises the expected exception for arbitrary objects
# that are *NOT* forward references.
with raises(BeartypeDecorHintForwardRefException):
express_func_scope_type_forwardref(
forwardref=b'The chainless winds still come and ever came',
forwardrefs_class_basename=forwardrefs_class_basename,
func_scope=func_scope,
)
| true | true |
1c47294261aa77c72a9bf2fb138f12409b92d6be | 440,048 | py | Python | ns-allinone-3.22/ns-3.22/src/dsdv/bindings/modulegen__gcc_LP64.py | gustavo978/helpful | 59e3fd062cff4451c9bf8268df78a24f93ff67b7 | [
"Unlicense"
] | null | null | null | ns-allinone-3.22/ns-3.22/src/dsdv/bindings/modulegen__gcc_LP64.py | gustavo978/helpful | 59e3fd062cff4451c9bf8268df78a24f93ff67b7 | [
"Unlicense"
] | null | null | null | ns-allinone-3.22/ns-3.22/src/dsdv/bindings/modulegen__gcc_LP64.py | gustavo978/helpful | 59e3fd062cff4451c9bf8268df78a24f93ff67b7 | [
"Unlicense"
] | 2 | 2018-06-06T14:10:23.000Z | 2020-04-07T17:20:55.000Z | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.dsdv', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## int-to-type.h (module 'core'): ns3::IntToType<0> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0'])
## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<1> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1'])
## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<2> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2'])
## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<3> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3'])
## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<4> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4'])
## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<5> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5'])
## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<6> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6'])
## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class]
module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## timer.h (module 'core'): ns3::Timer [class]
module.add_class('Timer', import_from_module='ns.core')
## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration]
module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
## timer.h (module 'core'): ns3::Timer::State [enumeration]
module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
## timer-impl.h (module 'core'): ns3::TimerImpl [class]
module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## dsdv-helper.h (module 'dsdv'): ns3::DsdvHelper [class]
module.add_class('DsdvHelper', parent=root_module['ns3::Ipv4RoutingHelper'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class]
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration]
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration]
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## socket.h (module 'network'): ns3::Socket [class]
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration]
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketType [enumeration]
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::SocketAddressTag [class]
module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpTosTag [class]
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpTtlTag [class]
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag [class]
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpv6TclassTag [class]
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class]
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## ipv4.h (module 'internet'): ns3::Ipv4 [class]
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface [class]
module.add_class('Ipv4Interface', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class]
module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration]
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class]
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class]
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class]
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting [class]
module.add_class('Ipv4ListRouting', import_from_module='ns.internet', parent=root_module['ns3::Ipv4RoutingProtocol'])
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace dsdv
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_dsdv(module):
root_module = module.get_root()
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RouteFlags [enumeration]
module.add_enum('RouteFlags', ['VALID', 'INVALID'])
## dsdv-packet.h (module 'dsdv'): ns3::dsdv::DsdvHeader [class]
module.add_class('DsdvHeader', parent=root_module['ns3::Header'])
## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::PacketQueue [class]
module.add_class('PacketQueue')
## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::QueueEntry [class]
module.add_class('QueueEntry')
## dsdv-routing-protocol.h (module 'dsdv'): ns3::dsdv::RoutingProtocol [class]
module.add_class('RoutingProtocol', parent=root_module['ns3::Ipv4RoutingProtocol'])
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTable [class]
module.add_class('RoutingTable')
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTableEntry [class]
module.add_class('RoutingTableEntry')
module.add_container('std::map< ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry >', ('ns3::Ipv4Address', 'ns3::dsdv::RoutingTableEntry'), container_type=u'map')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress'])
register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3DsdvHelper_methods(root_module, root_module['ns3::DsdvHelper'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Socket_methods(root_module, root_module['ns3::Socket'])
register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag'])
register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag'])
register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag'])
register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag'])
register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag'])
register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4Interface_methods(root_module, root_module['ns3::Ipv4Interface'])
register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute'])
register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route'])
register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3Ipv4ListRouting_methods(root_module, root_module['ns3::Ipv4ListRouting'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
register_Ns3DsdvDsdvHeader_methods(root_module, root_module['ns3::dsdv::DsdvHeader'])
register_Ns3DsdvPacketQueue_methods(root_module, root_module['ns3::dsdv::PacketQueue'])
register_Ns3DsdvQueueEntry_methods(root_module, root_module['ns3::dsdv::QueueEntry'])
register_Ns3DsdvRoutingProtocol_methods(root_module, root_module['ns3::dsdv::RoutingProtocol'])
register_Ns3DsdvRoutingTable_methods(root_module, root_module['ns3::dsdv::RoutingTable'])
register_Ns3DsdvRoutingTableEntry_methods(root_module, root_module['ns3::dsdv::RoutingTableEntry'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Inet6SocketAddress_methods(root_module, cls):
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor]
cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor]
cls.add_constructor([param('char const *', 'ipv6')])
## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function]
cls.add_method('ConvertFrom',
'ns3::Inet6SocketAddress',
[param('ns3::Address const &', 'addr')],
is_static=True)
## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function]
cls.add_method('GetIpv6',
'ns3::Ipv6Address',
[],
is_const=True)
## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function]
cls.add_method('GetPort',
'uint16_t',
[],
is_const=True)
## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'addr')],
is_static=True)
## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function]
cls.add_method('SetIpv6',
'void',
[param('ns3::Ipv6Address', 'ipv6')])
## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function]
cls.add_method('SetPort',
'void',
[param('uint16_t', 'port')])
return
def register_Ns3InetSocketAddress_methods(root_module, cls):
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [copy constructor]
cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor]
cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor]
cls.add_constructor([param('char const *', 'ipv4')])
## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::InetSocketAddress',
[param('ns3::Address const &', 'address')],
is_static=True)
## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function]
cls.add_method('GetIpv4',
'ns3::Ipv4Address',
[],
is_const=True)
## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function]
cls.add_method('GetPort',
'uint16_t',
[],
is_const=True)
## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ipv4Address', 'address')])
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function]
cls.add_method('SetPort',
'void',
[param('uint16_t', 'port')])
return
def register_Ns3IntToType__0_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')])
return
def register_Ns3IntToType__1_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')])
return
def register_Ns3IntToType__2_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')])
return
def register_Ns3IntToType__3_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')])
return
def register_Ns3IntToType__4_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')])
return
def register_Ns3IntToType__5_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')])
return
def register_Ns3IntToType__6_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor]
cls.add_constructor([])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor]
cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function]
cls.add_method('GetLocal',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function]
cls.add_method('GetMask',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function]
cls.add_method('GetScope',
'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function]
cls.add_method('IsSecondary',
'bool',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function]
cls.add_method('SetBroadcast',
'void',
[param('ns3::Ipv4Address', 'broadcast')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function]
cls.add_method('SetLocal',
'void',
[param('ns3::Ipv4Address', 'local')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function]
cls.add_method('SetPrimary',
'void',
[])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SetScope',
'void',
[param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function]
cls.add_method('SetSecondary',
'void',
[])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv4RoutingHelper_methods(root_module, cls):
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper() [constructor]
cls.add_constructor([])
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper(ns3::Ipv4RoutingHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4RoutingHelper const &', 'arg0')])
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper * ns3::Ipv4RoutingHelper::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ipv4RoutingHelper *',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-helper.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4RoutingHelper::Create(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheAllAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheAllEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintRoutingTableAllAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintRoutingTableAllEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintRoutingTableAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintRoutingTableEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3Timer_methods(root_module, cls):
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Timer const &', 'arg0')])
## timer.h (module 'core'): ns3::Timer::Timer() [constructor]
cls.add_constructor([])
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor]
cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')])
## timer.h (module 'core'): void ns3::Timer::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function]
cls.add_method('GetState',
'ns3::Timer::State',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function]
cls.add_method('IsSuspended',
'bool',
[],
is_const=True)
## timer.h (module 'core'): void ns3::Timer::Remove() [member function]
cls.add_method('Remove',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Resume() [member function]
cls.add_method('Resume',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule() [member function]
cls.add_method('Schedule',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function]
cls.add_method('Schedule',
'void',
[param('ns3::Time', 'delay')])
## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function]
cls.add_method('SetDelay',
'void',
[param('ns3::Time const &', 'delay')])
## timer.h (module 'core'): void ns3::Timer::Suspend() [member function]
cls.add_method('Suspend',
'void',
[])
return
def register_Ns3TimerImpl_methods(root_module, cls):
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor]
cls.add_constructor([])
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')])
## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3DsdvHelper_methods(root_module, cls):
## dsdv-helper.h (module 'dsdv'): ns3::DsdvHelper::DsdvHelper(ns3::DsdvHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DsdvHelper const &', 'arg0')])
## dsdv-helper.h (module 'dsdv'): ns3::DsdvHelper::DsdvHelper() [constructor]
cls.add_constructor([])
## dsdv-helper.h (module 'dsdv'): ns3::DsdvHelper * ns3::DsdvHelper::Copy() const [member function]
cls.add_method('Copy',
'ns3::DsdvHelper *',
[],
is_const=True, is_virtual=True)
## dsdv-helper.h (module 'dsdv'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::DsdvHelper::Create(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True, is_virtual=True)
## dsdv-helper.h (module 'dsdv'): void ns3::DsdvHelper::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Ipv4Header_methods(root_module, cls):
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor]
cls.add_constructor([])
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function]
cls.add_method('DscpTypeToString',
'std::string',
[param('ns3::Ipv4Header::DscpType', 'dscp')],
is_const=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function]
cls.add_method('EcnTypeToString',
'std::string',
[param('ns3::Ipv4Header::EcnType', 'ecn')],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function]
cls.add_method('EnableChecksum',
'void',
[])
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function]
cls.add_method('GetDscp',
'ns3::Ipv4Header::DscpType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function]
cls.add_method('GetEcn',
'ns3::Ipv4Header::EcnType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function]
cls.add_method('GetFragmentOffset',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function]
cls.add_method('GetIdentification',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function]
cls.add_method('GetPayloadSize',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function]
cls.add_method('GetProtocol',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function]
cls.add_method('IsChecksumOk',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function]
cls.add_method('IsDontFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function]
cls.add_method('IsLastFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'destination')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function]
cls.add_method('SetDontFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function]
cls.add_method('SetDscp',
'void',
[param('ns3::Ipv4Header::DscpType', 'dscp')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function]
cls.add_method('SetEcn',
'void',
[param('ns3::Ipv4Header::EcnType', 'ecn')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('uint16_t', 'offsetBytes')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function]
cls.add_method('SetIdentification',
'void',
[param('uint16_t', 'identification')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function]
cls.add_method('SetLastFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function]
cls.add_method('SetMayFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function]
cls.add_method('SetMoreFragments',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function]
cls.add_method('SetPayloadSize',
'void',
[param('uint16_t', 'size')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function]
cls.add_method('SetProtocol',
'void',
[param('uint8_t', 'num')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'source')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Socket_methods(root_module, cls):
## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Socket const &', 'arg0')])
## socket.h (module 'network'): ns3::Socket::Socket() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function]
cls.add_method('Bind',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind() [member function]
cls.add_method('Bind',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind6() [member function]
cls.add_method('Bind6',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function]
cls.add_method('BindToNetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'netdevice')],
is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Close() [member function]
cls.add_method('Close',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function]
cls.add_method('Connect',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function]
cls.add_method('CreateSocket',
'ns3::Ptr< ns3::Socket >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function]
cls.add_method('GetAllowBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function]
cls.add_method('GetBoundNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[])
## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function]
cls.add_method('GetErrno',
'ns3::Socket::SocketErrno',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTos() const [member function]
cls.add_method('GetIpTos',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTtl() const [member function]
cls.add_method('GetIpTtl',
'uint8_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6HopLimit() const [member function]
cls.add_method('GetIpv6HopLimit',
'uint8_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6Tclass() const [member function]
cls.add_method('GetIpv6Tclass',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function]
cls.add_method('GetRxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function]
cls.add_method('GetSockName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function]
cls.add_method('GetSocketType',
'ns3::Socket::SocketType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function]
cls.add_method('GetTxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTos() const [member function]
cls.add_method('IsIpRecvTos',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTtl() const [member function]
cls.add_method('IsIpRecvTtl',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvHopLimit() const [member function]
cls.add_method('IsIpv6RecvHopLimit',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvTclass() const [member function]
cls.add_method('IsIpv6RecvTclass',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function]
cls.add_method('IsRecvPktInfo',
'bool',
[],
is_const=True)
## socket.h (module 'network'): int ns3::Socket::Listen() [member function]
cls.add_method('Listen',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[])
## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Recv',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function]
cls.add_method('SendTo',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function]
cls.add_method('SendTo',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')])
## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function]
cls.add_method('SetAcceptCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')])
## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function]
cls.add_method('SetAllowBroadcast',
'bool',
[param('bool', 'allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function]
cls.add_method('SetCloseCallbacks',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')])
## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function]
cls.add_method('SetConnectCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')])
## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function]
cls.add_method('SetDataSentCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTos(bool ipv4RecvTos) [member function]
cls.add_method('SetIpRecvTos',
'void',
[param('bool', 'ipv4RecvTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTtl(bool ipv4RecvTtl) [member function]
cls.add_method('SetIpRecvTtl',
'void',
[param('bool', 'ipv4RecvTtl')])
## socket.h (module 'network'): void ns3::Socket::SetIpTos(uint8_t ipTos) [member function]
cls.add_method('SetIpTos',
'void',
[param('uint8_t', 'ipTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpTtl(uint8_t ipTtl) [member function]
cls.add_method('SetIpTtl',
'void',
[param('uint8_t', 'ipTtl')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6HopLimit(uint8_t ipHopLimit) [member function]
cls.add_method('SetIpv6HopLimit',
'void',
[param('uint8_t', 'ipHopLimit')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvHopLimit(bool ipv6RecvHopLimit) [member function]
cls.add_method('SetIpv6RecvHopLimit',
'void',
[param('bool', 'ipv6RecvHopLimit')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvTclass(bool ipv6RecvTclass) [member function]
cls.add_method('SetIpv6RecvTclass',
'void',
[param('bool', 'ipv6RecvTclass')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6Tclass(int ipTclass) [member function]
cls.add_method('SetIpv6Tclass',
'void',
[param('int', 'ipTclass')])
## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('SetRecvCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function]
cls.add_method('SetRecvPktInfo',
'void',
[param('bool', 'flag')])
## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')])
## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function]
cls.add_method('ShutdownRecv',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function]
cls.add_method('ShutdownSend',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## socket.h (module 'network'): bool ns3::Socket::IsManualIpTos() const [member function]
cls.add_method('IsManualIpTos',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpTtl() const [member function]
cls.add_method('IsManualIpTtl',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6HopLimit() const [member function]
cls.add_method('IsManualIpv6HopLimit',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6Tclass() const [member function]
cls.add_method('IsManualIpv6Tclass',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function]
cls.add_method('NotifyConnectionFailed',
'void',
[],
visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function]
cls.add_method('NotifyConnectionRequest',
'bool',
[param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function]
cls.add_method('NotifyConnectionSucceeded',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function]
cls.add_method('NotifyDataRecv',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function]
cls.add_method('NotifyDataSent',
'void',
[param('uint32_t', 'size')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function]
cls.add_method('NotifyErrorClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function]
cls.add_method('NotifyNewConnectionCreated',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function]
cls.add_method('NotifyNormalClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function]
cls.add_method('NotifySend',
'void',
[param('uint32_t', 'spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketAddressTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'addr')])
return
def register_Ns3SocketIpTosTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag(ns3::SocketIpTosTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpTosTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTosTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTosTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTosTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTosTag::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTosTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
return
def register_Ns3SocketIpTtlTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3SocketIpv6HopLimitTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag(ns3::SocketIpv6HopLimitTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpv6HopLimitTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpv6HopLimitTag::GetHopLimit() const [member function]
cls.add_method('GetHopLimit',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6HopLimitTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpv6HopLimitTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6HopLimitTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::SetHopLimit(uint8_t hopLimit) [member function]
cls.add_method('SetHopLimit',
'void',
[param('uint8_t', 'hopLimit')])
return
def register_Ns3SocketIpv6TclassTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag(ns3::SocketIpv6TclassTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpv6TclassTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6TclassTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpv6TclassTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpv6TclassTag::GetTclass() const [member function]
cls.add_method('GetTclass',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6TclassTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::SetTclass(uint8_t tclass) [member function]
cls.add_method('SetTclass',
'void',
[param('uint8_t', 'tclass')])
return
def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Ipv4_methods(root_module, cls):
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')])
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor]
cls.add_constructor([])
## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SendWithHeader',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'interface'), param('bool', 'val')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'interface'), param('uint16_t', 'metric')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable]
cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4Interface_methods(root_module, cls):
## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface::Ipv4Interface(ns3::Ipv4Interface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Interface const &', 'arg0')])
## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface::Ipv4Interface() [constructor]
cls.add_constructor([])
## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::AddAddress(ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('ns3::Ipv4InterfaceAddress', 'address')])
## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::GetAddress(uint32_t index) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'index')],
is_const=True)
## ipv4-interface.h (module 'internet'): ns3::Ptr<ns3::ArpCache> ns3::Ipv4Interface::GetArpCache() const [member function]
cls.add_method('GetArpCache',
'ns3::Ptr< ns3::ArpCache >',
[],
is_const=True)
## ipv4-interface.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Interface::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## ipv4-interface.h (module 'internet'): uint16_t ns3::Ipv4Interface::GetMetric() const [member function]
cls.add_method('GetMetric',
'uint16_t',
[],
is_const=True)
## ipv4-interface.h (module 'internet'): uint32_t ns3::Ipv4Interface::GetNAddresses() const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[],
is_const=True)
## ipv4-interface.h (module 'internet'): static ns3::TypeId ns3::Ipv4Interface::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsDown() const [member function]
cls.add_method('IsDown',
'bool',
[],
is_const=True)
## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsForwarding() const [member function]
cls.add_method('IsForwarding',
'bool',
[],
is_const=True)
## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsUp() const [member function]
cls.add_method('IsUp',
'bool',
[],
is_const=True)
## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::RemoveAddress(uint32_t index) [member function]
cls.add_method('RemoveAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'index')])
## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::RemoveAddress(ns3::Ipv4Address address) [member function]
cls.add_method('RemoveAddress',
'ns3::Ipv4InterfaceAddress',
[param('ns3::Ipv4Address', 'address')])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::Send(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Address dest) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Address', 'dest')])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetArpCache(ns3::Ptr<ns3::ArpCache> arpCache) [member function]
cls.add_method('SetArpCache',
'void',
[param('ns3::Ptr< ns3::ArpCache >', 'arpCache')])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetDown() [member function]
cls.add_method('SetDown',
'void',
[])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetForwarding(bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('bool', 'val')])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetMetric(uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint16_t', 'metric')])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetUp() [member function]
cls.add_method('SetUp',
'void',
[])
## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4L3Protocol_methods(root_module, cls):
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::Ipv4L3Protocol() [constructor]
cls.add_constructor([])
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::AddAddress(uint32_t i, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'i'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4L3Protocol::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4L3Protocol::GetAddress(uint32_t interfaceIndex, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Interface> ns3::Ipv4L3Protocol::GetInterface(uint32_t i) const [member function]
cls.add_method('GetInterface',
'ns3::Ptr< ns3::Ipv4Interface >',
[param('uint32_t', 'i')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForAddress(ns3::Ipv4Address addr) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'addr')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForPrefix(ns3::Ipv4Address addr, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'addr'), param('ns3::Ipv4Mask', 'mask')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMetric(uint32_t i) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMtu(uint32_t i) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4L3Protocol::GetNetDevice(uint32_t i) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4L3Protocol::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4L3Protocol::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L3Protocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsForwarding(uint32_t i) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUnicast(ns3::Ipv4Address ad) const [member function]
cls.add_method('IsUnicast',
'bool',
[param('ns3::Ipv4Address', 'ad')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUp(uint32_t i) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Receive(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::Packet const> p, uint16_t protocol, ns3::Address const & from, ns3::Address const & to, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')])
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interfaceIndex, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4L3Protocol::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SendWithHeader',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDefaultTtl(uint8_t ttl) [member function]
cls.add_method('SetDefaultTtl',
'void',
[param('uint8_t', 'ttl')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDown(uint32_t i) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetForwarding(uint32_t i, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'i'), param('bool', 'val')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetMetric(uint32_t i, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'i'), param('uint16_t', 'metric')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetUp(uint32_t i) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::PROT_NUMBER [variable]
cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
visibility='private', is_virtual=True)
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv4MulticastRoute_methods(root_module, cls):
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function]
cls.add_method('GetGroup',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function]
cls.add_method('GetOrigin',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function]
cls.add_method('GetOutputTtlMap',
'std::map< unsigned int, unsigned int >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function]
cls.add_method('GetParent',
'uint32_t',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function]
cls.add_method('SetGroup',
'void',
[param('ns3::Ipv4Address const', 'group')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function]
cls.add_method('SetOrigin',
'void',
[param('ns3::Ipv4Address const', 'origin')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function]
cls.add_method('SetOutputTtl',
'void',
[param('uint32_t', 'oif'), param('uint32_t', 'ttl')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function]
cls.add_method('SetParent',
'void',
[param('uint32_t', 'iif')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable]
cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable]
cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True)
return
def register_Ns3Ipv4Route_methods(root_module, cls):
cls.add_output_stream_operator()
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function]
cls.add_method('GetGateway',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function]
cls.add_method('GetOutputDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'dest')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function]
cls.add_method('SetGateway',
'void',
[param('ns3::Ipv4Address', 'gw')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function]
cls.add_method('SetOutputDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'src')])
return
def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls):
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor]
cls.add_constructor([])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')])
## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<ns3::Packet const> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3Ipv4ListRouting_methods(root_module, cls):
## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting(ns3::Ipv4ListRouting const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4ListRouting const &', 'arg0')])
## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting() [constructor]
cls.add_constructor([])
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::AddRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol, int16_t priority) [member function]
cls.add_method('AddRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol'), param('int16_t', 'priority')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): uint32_t ns3::Ipv4ListRouting::GetNRoutingProtocols() const [member function]
cls.add_method('GetNRoutingProtocols',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4ListRouting::GetRoutingProtocol(uint32_t index, int16_t & priority) const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[param('uint32_t', 'index'), param('int16_t &', 'priority', direction=2)],
is_const=True, is_virtual=True)
## ipv4-list-routing.h (module 'internet'): static ns3::TypeId ns3::Ipv4ListRouting::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_const=True, is_virtual=True)
## ipv4-list-routing.h (module 'internet'): bool ns3::Ipv4ListRouting::RouteInput(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4ListRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3DsdvDsdvHeader_methods(root_module, cls):
cls.add_output_stream_operator()
## dsdv-packet.h (module 'dsdv'): ns3::dsdv::DsdvHeader::DsdvHeader(ns3::dsdv::DsdvHeader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dsdv::DsdvHeader const &', 'arg0')])
## dsdv-packet.h (module 'dsdv'): ns3::dsdv::DsdvHeader::DsdvHeader(ns3::Ipv4Address dst=ns3::Ipv4Address(), uint32_t hopcount=0, uint32_t dstSeqNo=0) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('uint32_t', 'hopcount', default_value='0'), param('uint32_t', 'dstSeqNo', default_value='0')])
## dsdv-packet.h (module 'dsdv'): uint32_t ns3::dsdv::DsdvHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## dsdv-packet.h (module 'dsdv'): ns3::Ipv4Address ns3::dsdv::DsdvHeader::GetDst() const [member function]
cls.add_method('GetDst',
'ns3::Ipv4Address',
[],
is_const=True)
## dsdv-packet.h (module 'dsdv'): uint32_t ns3::dsdv::DsdvHeader::GetDstSeqno() const [member function]
cls.add_method('GetDstSeqno',
'uint32_t',
[],
is_const=True)
## dsdv-packet.h (module 'dsdv'): uint32_t ns3::dsdv::DsdvHeader::GetHopCount() const [member function]
cls.add_method('GetHopCount',
'uint32_t',
[],
is_const=True)
## dsdv-packet.h (module 'dsdv'): ns3::TypeId ns3::dsdv::DsdvHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## dsdv-packet.h (module 'dsdv'): uint32_t ns3::dsdv::DsdvHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## dsdv-packet.h (module 'dsdv'): static ns3::TypeId ns3::dsdv::DsdvHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## dsdv-packet.h (module 'dsdv'): void ns3::dsdv::DsdvHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## dsdv-packet.h (module 'dsdv'): void ns3::dsdv::DsdvHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## dsdv-packet.h (module 'dsdv'): void ns3::dsdv::DsdvHeader::SetDst(ns3::Ipv4Address destination) [member function]
cls.add_method('SetDst',
'void',
[param('ns3::Ipv4Address', 'destination')])
## dsdv-packet.h (module 'dsdv'): void ns3::dsdv::DsdvHeader::SetDstSeqno(uint32_t sequenceNumber) [member function]
cls.add_method('SetDstSeqno',
'void',
[param('uint32_t', 'sequenceNumber')])
## dsdv-packet.h (module 'dsdv'): void ns3::dsdv::DsdvHeader::SetHopCount(uint32_t hopCount) [member function]
cls.add_method('SetHopCount',
'void',
[param('uint32_t', 'hopCount')])
return
def register_Ns3DsdvPacketQueue_methods(root_module, cls):
## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::PacketQueue::PacketQueue(ns3::dsdv::PacketQueue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dsdv::PacketQueue const &', 'arg0')])
## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::PacketQueue::PacketQueue() [constructor]
cls.add_constructor([])
## dsdv-packet-queue.h (module 'dsdv'): bool ns3::dsdv::PacketQueue::Dequeue(ns3::Ipv4Address dst, ns3::dsdv::QueueEntry & entry) [member function]
cls.add_method('Dequeue',
'bool',
[param('ns3::Ipv4Address', 'dst'), param('ns3::dsdv::QueueEntry &', 'entry')])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::PacketQueue::DropPacketWithDst(ns3::Ipv4Address dst) [member function]
cls.add_method('DropPacketWithDst',
'void',
[param('ns3::Ipv4Address', 'dst')])
## dsdv-packet-queue.h (module 'dsdv'): bool ns3::dsdv::PacketQueue::Enqueue(ns3::dsdv::QueueEntry & entry) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::dsdv::QueueEntry &', 'entry')])
## dsdv-packet-queue.h (module 'dsdv'): bool ns3::dsdv::PacketQueue::Find(ns3::Ipv4Address dst) [member function]
cls.add_method('Find',
'bool',
[param('ns3::Ipv4Address', 'dst')])
## dsdv-packet-queue.h (module 'dsdv'): uint32_t ns3::dsdv::PacketQueue::GetCountForPacketsWithDst(ns3::Ipv4Address dst) [member function]
cls.add_method('GetCountForPacketsWithDst',
'uint32_t',
[param('ns3::Ipv4Address', 'dst')])
## dsdv-packet-queue.h (module 'dsdv'): uint32_t ns3::dsdv::PacketQueue::GetMaxPacketsPerDst() const [member function]
cls.add_method('GetMaxPacketsPerDst',
'uint32_t',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): uint32_t ns3::dsdv::PacketQueue::GetMaxQueueLen() const [member function]
cls.add_method('GetMaxQueueLen',
'uint32_t',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): ns3::Time ns3::dsdv::PacketQueue::GetQueueTimeout() const [member function]
cls.add_method('GetQueueTimeout',
'ns3::Time',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): uint32_t ns3::dsdv::PacketQueue::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::PacketQueue::SetMaxPacketsPerDst(uint32_t len) [member function]
cls.add_method('SetMaxPacketsPerDst',
'void',
[param('uint32_t', 'len')])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::PacketQueue::SetMaxQueueLen(uint32_t len) [member function]
cls.add_method('SetMaxQueueLen',
'void',
[param('uint32_t', 'len')])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::PacketQueue::SetQueueTimeout(ns3::Time t) [member function]
cls.add_method('SetQueueTimeout',
'void',
[param('ns3::Time', 't')])
return
def register_Ns3DsdvQueueEntry_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::QueueEntry::QueueEntry(ns3::dsdv::QueueEntry const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dsdv::QueueEntry const &', 'arg0')])
## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::QueueEntry::QueueEntry(ns3::Ptr<ns3::Packet const> pa=0, ns3::Ipv4Header const & h=ns3::Ipv4Header(), ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb=ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>(), ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb=ns3::Callback<void, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>()) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet const >', 'pa', default_value='0'), param('ns3::Ipv4Header const &', 'h', default_value='ns3::Ipv4Header()'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb', default_value='ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>()'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb', default_value='ns3::Callback<void, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>()')])
## dsdv-packet-queue.h (module 'dsdv'): ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::dsdv::QueueEntry::GetErrorCallback() const [member function]
cls.add_method('GetErrorCallback',
'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): ns3::Time ns3::dsdv::QueueEntry::GetExpireTime() const [member function]
cls.add_method('GetExpireTime',
'ns3::Time',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): ns3::Ipv4Header ns3::dsdv::QueueEntry::GetIpv4Header() const [member function]
cls.add_method('GetIpv4Header',
'ns3::Ipv4Header',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): ns3::Ptr<ns3::Packet const> ns3::dsdv::QueueEntry::GetPacket() const [member function]
cls.add_method('GetPacket',
'ns3::Ptr< ns3::Packet const >',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::dsdv::QueueEntry::GetUnicastForwardCallback() const [member function]
cls.add_method('GetUnicastForwardCallback',
'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::QueueEntry::SetErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function]
cls.add_method('SetErrorCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::QueueEntry::SetExpireTime(ns3::Time exp) [member function]
cls.add_method('SetExpireTime',
'void',
[param('ns3::Time', 'exp')])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::QueueEntry::SetIpv4Header(ns3::Ipv4Header h) [member function]
cls.add_method('SetIpv4Header',
'void',
[param('ns3::Ipv4Header', 'h')])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::QueueEntry::SetPacket(ns3::Ptr<ns3::Packet const> p) [member function]
cls.add_method('SetPacket',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'p')])
## dsdv-packet-queue.h (module 'dsdv'): void ns3::dsdv::QueueEntry::SetUnicastForwardCallback(ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb) [member function]
cls.add_method('SetUnicastForwardCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb')])
return
def register_Ns3DsdvRoutingProtocol_methods(root_module, cls):
## dsdv-routing-protocol.h (module 'dsdv'): ns3::dsdv::RoutingProtocol::RoutingProtocol(ns3::dsdv::RoutingProtocol const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dsdv::RoutingProtocol const &', 'arg0')])
## dsdv-routing-protocol.h (module 'dsdv'): ns3::dsdv::RoutingProtocol::RoutingProtocol() [constructor]
cls.add_constructor([])
## dsdv-routing-protocol.h (module 'dsdv'): int64_t ns3::dsdv::RoutingProtocol::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): bool ns3::dsdv::RoutingProtocol::GetEnableBufferFlag() const [member function]
cls.add_method('GetEnableBufferFlag',
'bool',
[],
is_const=True)
## dsdv-routing-protocol.h (module 'dsdv'): bool ns3::dsdv::RoutingProtocol::GetEnableRAFlag() const [member function]
cls.add_method('GetEnableRAFlag',
'bool',
[],
is_const=True)
## dsdv-routing-protocol.h (module 'dsdv'): static ns3::TypeId ns3::dsdv::RoutingProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## dsdv-routing-protocol.h (module 'dsdv'): bool ns3::dsdv::RoutingProtocol::GetWSTFlag() const [member function]
cls.add_method('GetWSTFlag',
'bool',
[],
is_const=True)
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_const=True, is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): bool ns3::dsdv::RoutingProtocol::RouteInput(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): ns3::Ptr<ns3::Ipv4Route> ns3::dsdv::RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::SetEnableBufferFlag(bool f) [member function]
cls.add_method('SetEnableBufferFlag',
'void',
[param('bool', 'f')])
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::SetEnableRAFlag(bool f) [member function]
cls.add_method('SetEnableRAFlag',
'void',
[param('bool', 'f')])
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_virtual=True)
## dsdv-routing-protocol.h (module 'dsdv'): void ns3::dsdv::RoutingProtocol::SetWSTFlag(bool f) [member function]
cls.add_method('SetWSTFlag',
'void',
[param('bool', 'f')])
## dsdv-routing-protocol.h (module 'dsdv'): ns3::dsdv::RoutingProtocol::DSDV_PORT [variable]
cls.add_static_attribute('DSDV_PORT', 'uint32_t const', is_const=True)
return
def register_Ns3DsdvRoutingTable_methods(root_module, cls):
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTable::RoutingTable(ns3::dsdv::RoutingTable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dsdv::RoutingTable const &', 'arg0')])
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTable::RoutingTable() [constructor]
cls.add_constructor([])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::AddIpv4Event(ns3::Ipv4Address address, ns3::EventId id) [member function]
cls.add_method('AddIpv4Event',
'bool',
[param('ns3::Ipv4Address', 'address'), param('ns3::EventId', 'id')])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::AddRoute(ns3::dsdv::RoutingTableEntry & r) [member function]
cls.add_method('AddRoute',
'bool',
[param('ns3::dsdv::RoutingTableEntry &', 'r')])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::AnyRunningEvent(ns3::Ipv4Address address) [member function]
cls.add_method('AnyRunningEvent',
'bool',
[param('ns3::Ipv4Address', 'address')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTable::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTable::DeleteAllRoutesFromInterface(ns3::Ipv4InterfaceAddress iface) [member function]
cls.add_method('DeleteAllRoutesFromInterface',
'void',
[param('ns3::Ipv4InterfaceAddress', 'iface')])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::DeleteIpv4Event(ns3::Ipv4Address address) [member function]
cls.add_method('DeleteIpv4Event',
'bool',
[param('ns3::Ipv4Address', 'address')])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::DeleteRoute(ns3::Ipv4Address dst) [member function]
cls.add_method('DeleteRoute',
'bool',
[param('ns3::Ipv4Address', 'dst')])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::ForceDeleteIpv4Event(ns3::Ipv4Address address) [member function]
cls.add_method('ForceDeleteIpv4Event',
'bool',
[param('ns3::Ipv4Address', 'address')])
## dsdv-rtable.h (module 'dsdv'): ns3::EventId ns3::dsdv::RoutingTable::GetEventId(ns3::Ipv4Address address) [member function]
cls.add_method('GetEventId',
'ns3::EventId',
[param('ns3::Ipv4Address', 'address')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTable::GetListOfAllRoutes(std::map<ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry, std::less<ns3::Ipv4Address>, std::allocator<std::pair<ns3::Ipv4Address const, ns3::dsdv::RoutingTableEntry> > > & allRoutes) [member function]
cls.add_method('GetListOfAllRoutes',
'void',
[param('std::map< ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry > &', 'allRoutes')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTable::GetListOfDestinationWithNextHop(ns3::Ipv4Address nxtHp, std::map<ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry, std::less<ns3::Ipv4Address>, std::allocator<std::pair<ns3::Ipv4Address const, ns3::dsdv::RoutingTableEntry> > > & dstList) [member function]
cls.add_method('GetListOfDestinationWithNextHop',
'void',
[param('ns3::Ipv4Address', 'nxtHp'), param('std::map< ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry > &', 'dstList')])
## dsdv-rtable.h (module 'dsdv'): ns3::Time ns3::dsdv::RoutingTable::Getholddowntime() const [member function]
cls.add_method('Getholddowntime',
'ns3::Time',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::LookupRoute(ns3::Ipv4Address dst, ns3::dsdv::RoutingTableEntry & rt) [member function]
cls.add_method('LookupRoute',
'bool',
[param('ns3::Ipv4Address', 'dst'), param('ns3::dsdv::RoutingTableEntry &', 'rt')])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::LookupRoute(ns3::Ipv4Address id, ns3::dsdv::RoutingTableEntry & rt, bool forRouteInput) [member function]
cls.add_method('LookupRoute',
'bool',
[param('ns3::Ipv4Address', 'id'), param('ns3::dsdv::RoutingTableEntry &', 'rt'), param('bool', 'forRouteInput')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTable::Print(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('Print',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTable::Purge(std::map<ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry, std::less<ns3::Ipv4Address>, std::allocator<std::pair<ns3::Ipv4Address const, ns3::dsdv::RoutingTableEntry> > > & removedAddresses) [member function]
cls.add_method('Purge',
'void',
[param('std::map< ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry > &', 'removedAddresses')])
## dsdv-rtable.h (module 'dsdv'): uint32_t ns3::dsdv::RoutingTable::RoutingTableSize() [member function]
cls.add_method('RoutingTableSize',
'uint32_t',
[])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTable::Setholddowntime(ns3::Time t) [member function]
cls.add_method('Setholddowntime',
'void',
[param('ns3::Time', 't')])
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTable::Update(ns3::dsdv::RoutingTableEntry & rt) [member function]
cls.add_method('Update',
'bool',
[param('ns3::dsdv::RoutingTableEntry &', 'rt')])
return
def register_Ns3DsdvRoutingTableEntry_methods(root_module, cls):
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTableEntry::RoutingTableEntry(ns3::dsdv::RoutingTableEntry const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dsdv::RoutingTableEntry const &', 'arg0')])
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTableEntry::RoutingTableEntry(ns3::Ptr<ns3::NetDevice> dev=0, ns3::Ipv4Address dst=ns3::Ipv4Address(), uint32_t m_seqNo=0, ns3::Ipv4InterfaceAddress iface=ns3::Ipv4InterfaceAddress(), uint32_t hops=0, ns3::Ipv4Address nextHop=ns3::Ipv4Address(), ns3::Time lifetime=ns3::Simulator::Now( ), ns3::Time SettlingTime=ns3::Simulator::Now( ), bool changedEntries=false) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev', default_value='0'), param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('uint32_t', 'm_seqNo', default_value='0'), param('ns3::Ipv4InterfaceAddress', 'iface', default_value='ns3::Ipv4InterfaceAddress()'), param('uint32_t', 'hops', default_value='0'), param('ns3::Ipv4Address', 'nextHop', default_value='ns3::Ipv4Address()'), param('ns3::Time', 'lifetime', default_value='ns3::Simulator::Now( )'), param('ns3::Time', 'SettlingTime', default_value='ns3::Simulator::Now( )'), param('bool', 'changedEntries', default_value='false')])
## dsdv-rtable.h (module 'dsdv'): ns3::Ipv4Address ns3::dsdv::RoutingTableEntry::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): bool ns3::dsdv::RoutingTableEntry::GetEntriesChanged() const [member function]
cls.add_method('GetEntriesChanged',
'bool',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RouteFlags ns3::dsdv::RoutingTableEntry::GetFlag() const [member function]
cls.add_method('GetFlag',
'ns3::dsdv::RouteFlags',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): uint32_t ns3::dsdv::RoutingTableEntry::GetHop() const [member function]
cls.add_method('GetHop',
'uint32_t',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): ns3::Ipv4InterfaceAddress ns3::dsdv::RoutingTableEntry::GetInterface() const [member function]
cls.add_method('GetInterface',
'ns3::Ipv4InterfaceAddress',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): ns3::Time ns3::dsdv::RoutingTableEntry::GetLifeTime() const [member function]
cls.add_method('GetLifeTime',
'ns3::Time',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): ns3::Ipv4Address ns3::dsdv::RoutingTableEntry::GetNextHop() const [member function]
cls.add_method('GetNextHop',
'ns3::Ipv4Address',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): ns3::Ptr<ns3::NetDevice> ns3::dsdv::RoutingTableEntry::GetOutputDevice() const [member function]
cls.add_method('GetOutputDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): ns3::Ptr<ns3::Ipv4Route> ns3::dsdv::RoutingTableEntry::GetRoute() const [member function]
cls.add_method('GetRoute',
'ns3::Ptr< ns3::Ipv4Route >',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): uint32_t ns3::dsdv::RoutingTableEntry::GetSeqNo() const [member function]
cls.add_method('GetSeqNo',
'uint32_t',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): ns3::Time ns3::dsdv::RoutingTableEntry::GetSettlingTime() const [member function]
cls.add_method('GetSettlingTime',
'ns3::Time',
[],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::Print(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('Print',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_const=True)
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetEntriesChanged(bool entriesChanged) [member function]
cls.add_method('SetEntriesChanged',
'void',
[param('bool', 'entriesChanged')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetFlag(ns3::dsdv::RouteFlags flag) [member function]
cls.add_method('SetFlag',
'void',
[param('ns3::dsdv::RouteFlags', 'flag')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetHop(uint32_t hopCount) [member function]
cls.add_method('SetHop',
'void',
[param('uint32_t', 'hopCount')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetInterface(ns3::Ipv4InterfaceAddress iface) [member function]
cls.add_method('SetInterface',
'void',
[param('ns3::Ipv4InterfaceAddress', 'iface')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetLifeTime(ns3::Time lifeTime) [member function]
cls.add_method('SetLifeTime',
'void',
[param('ns3::Time', 'lifeTime')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetNextHop(ns3::Ipv4Address nextHop) [member function]
cls.add_method('SetNextHop',
'void',
[param('ns3::Ipv4Address', 'nextHop')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetOutputDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('SetOutputDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetRoute(ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SetRoute',
'void',
[param('ns3::Ptr< ns3::Ipv4Route >', 'route')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetSeqNo(uint32_t sequenceNumber) [member function]
cls.add_method('SetSeqNo',
'void',
[param('uint32_t', 'sequenceNumber')])
## dsdv-rtable.h (module 'dsdv'): void ns3::dsdv::RoutingTableEntry::SetSettlingTime(ns3::Time settlingTime) [member function]
cls.add_method('SetSettlingTime',
'void',
[param('ns3::Time', 'settlingTime')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_dsdv(module.get_submodule('dsdv'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_dsdv(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| 63.645936 | 934 | 0.618019 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.dsdv', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
'ns.network')
le['ns3::Address'], import_from_module='ns.network')
uteConstructionList'])
='ns.network')
work', outer_class=root_module['ns3::Buffer'])
='ns.network')
ter_class=root_module['ns3::ByteTagIterator'])
twork')
_class=root_module['ns3::ByteTagList'])
_module['ns3::ByteTagList::Iterator'])
e='ns.core')
e='ns.core')
odule='ns.core')
le['ns3::Address'])
)
ule['ns3::Address'])
ore', template_parameters=['0'])
IntToType< 0 >'], import_from_module='ns.core')
ore', template_parameters=['1'])
IntToType< 1 >'], import_from_module='ns.core')
ore', template_parameters=['2'])
IntToType< 2 >'], import_from_module='ns.core')
ore', template_parameters=['3'])
IntToType< 3 >'], import_from_module='ns.core')
ore', template_parameters=['4'])
IntToType< 4 >'], import_from_module='ns.core')
ore', template_parameters=['5'])
IntToType< 5 >'], import_from_module='ns.core')
ore', template_parameters=['6'])
IntToType< 6 >'], import_from_module='ns.core')
etwork')
oot_module['ns3::Address'])
terfaceAddress'], import_from_module='ns.internet')
etwork')
rom_module='ns.internet')
etwork')
oot_module['ns3::Address'])
etwork')
work')
ue, import_from_module='ns.core')
le='ns.core')
core')
ork')
=root_module['ns3::PacketMetadata'])
ule['ns3::PacketMetadata::Item'], import_from_module='ns.network')
s=root_module['ns3::PacketMetadata'])
='ns.network')
r_class=root_module['ns3::PacketTagIterator'])
ork')
s=root_module['ns3::PacketTagList'])
:TagData'], import_from_module='ns.network')
eters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ity='private', import_from_module='ns.core')
ule='ns.network', parent=root_module['ns3::ObjectBase'])
.network')
ule='ns.core')
dule='ns.core')
E_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
ENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
rue, import_from_module='ns.core')
le='ns.core')
CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
re', outer_class=root_module['ns3::TypeId'])
re', outer_class=root_module['ns3::TypeId'])
dule='ns.core')
ore')
uter_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
e='ns.network', parent=root_module['ns3::ObjectBase'])
ns3::Ipv4RoutingHelper'])
='ns.network', parent=root_module['ns3::Chunk'])
nternet', parent=root_module['ns3::Header'])
SCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
ule='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
core', outer_class=root_module['ns3::Object'])
arent=root_module['ns3::Object'])
arent=root_module['ns3::RandomVariableStream'])
'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
, 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
e', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
, 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ntImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
s3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
Vector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
='ns.network', parent=root_module['ns3::Object'])
, 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
PACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
='ns.network', parent=root_module['ns3::Tag'])
='ns.network', parent=root_module['ns3::Tag'])
='ns.network', parent=root_module['ns3::Tag'])
='ns.network', parent=root_module['ns3::Tag'])
='ns.network', parent=root_module['ns3::Tag'])
='ns.network', parent=root_module['ns3::Tag'])
ule='ns.core')
'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
erts_to(root_module['ns3::int64x64_t'])
parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
'ns.network', parent=root_module['ns3::Chunk'])
arent=root_module['ns3::RandomVariableStream'])
arent=root_module['ns3::RandomVariableStream'])
arent=root_module['ns3::RandomVariableStream'])
arent=root_module['ns3::RandomVariableStream'])
arent=root_module['ns3::RandomVariableStream'])
='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
e='ns.core', parent=root_module['ns3::AttributeChecker'])
e='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
e='ns.core', parent=root_module['ns3::AttributeValue'])
arent=root_module['ns3::RandomVariableStream'])
arent=root_module['ns3::RandomVariableStream'])
arent=root_module['ns3::RandomVariableStream'])
='ns.core', parent=root_module['ns3::AttributeValue'])
arent=root_module['ns3::RandomVariableStream'])
'ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
arent=root_module['ns3::RandomVariableStream'])
arent=root_module['ns3::RandomVariableStream'])
e='ns.internet', parent=root_module['ns3::Object'])
etwork', parent=root_module['ns3::AttributeChecker'])
etwork', parent=root_module['ns3::AttributeValue'])
rnet', parent=root_module['ns3::Object'])
et', parent=root_module['ns3::Ipv4'])
KSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
etwork', parent=root_module['ns3::AttributeChecker'])
etwork', parent=root_module['ns3::AttributeValue'])
internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
parent=root_module['ns3::Object'])
etwork', parent=root_module['ns3::AttributeChecker'])
etwork', parent=root_module['ns3::AttributeValue'])
etwork', parent=root_module['ns3::AttributeChecker'])
etwork', parent=root_module['ns3::AttributeValue'])
arent=root_module['ns3::RandomVariableStream'])
.network', parent=root_module['ns3::Object'])
ET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
le='ns.network', parent=root_module['ns3::Object'])
arent=root_module['ns3::RandomVariableStream'])
core', parent=root_module['ns3::AttributeChecker'])
core', parent=root_module['ns3::AttributeValue'])
parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
arent=root_module['ns3::RandomVariableStream'])
ule='ns.core', parent=root_module['ns3::AttributeValue'])
le='ns.core', parent=root_module['ns3::AttributeChecker'])
le='ns.core', parent=root_module['ns3::AttributeValue'])
'ns.network', parent=root_module['ns3::AttributeChecker'])
'ns.network', parent=root_module['ns3::AttributeValue'])
t', parent=root_module['ns3::Ipv4RoutingProtocol'])
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
talImpl')
register_types_ns3_FatalImpl(nested_module)
e('Hash')
register_types_ns3_Hash(nested_module)
e('dsdv')
register_types_ns3_dsdv(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
, parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
unction')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
ent=root_module['ns3::Hash::Implementation'])
oot_module['ns3::Hash::Implementation'])
oot_module['ns3::Hash::Implementation'])
root_module['ns3::Hash::Implementation'])
def register_types_ns3_dsdv(module):
root_module = module.get_root()
)
'ns3::Ipv4Address', 'ns3::dsdv::RoutingTableEntry'), container_type=u'map')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress'])
register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3DsdvHelper_methods(root_module, root_module['ns3::DsdvHelper'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Socket_methods(root_module, root_module['ns3::Socket'])
register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag'])
register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag'])
register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag'])
register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag'])
register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag'])
register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4Interface_methods(root_module, root_module['ns3::Ipv4Interface'])
register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute'])
register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route'])
register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3Ipv4ListRouting_methods(root_module, root_module['ns3::Ipv4ListRouting'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
register_Ns3DsdvDsdvHeader_methods(root_module, root_module['ns3::dsdv::DsdvHeader'])
register_Ns3DsdvPacketQueue_methods(root_module, root_module['ns3::dsdv::PacketQueue'])
register_Ns3DsdvQueueEntry_methods(root_module, root_module['ns3::dsdv::QueueEntry'])
register_Ns3DsdvRoutingProtocol_methods(root_module, root_module['ns3::dsdv::RoutingProtocol'])
register_Ns3DsdvRoutingTable_methods(root_module, root_module['ns3::dsdv::RoutingTable'])
register_Ns3DsdvRoutingTableEntry_methods(root_module, root_module['ns3::dsdv::RoutingTableEntry'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
param('uint8_t', 'len')])
am('uint8_t', 'len')],
is_const=True)
), param('uint8_t', 'len')])
buffer')],
is_const=True)
er', 'buffer')])
is_const=True)
is_const=True)
is_const=True)
'type')],
is_const=True)
is_static=True)
buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
2_t', 'length')],
is_const=True)
is_const=True)
), param('uint32_t', 'size')])
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
int32_t', 'end')])
t32_t', 'start')])
'uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
is_const=True)
is_const=True)
is_const=True)
elta')])
elta')])
uint32_t', 'size')])
uint32_t', 'size')])
uint32_t', 'size')])
:Iterator', 'end')])
data')])
data')])
data')])
data')])
data')])
data')])
data')])
data')])
data')])
data')])
'uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
register_Ns3ByteTagIteratorItem_methods(root_module, cls):
=True)
is_const=True)
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
et')])
ffsetEnd')],
is_const=True)
eturn
def register_Ns3ByteTagListIterator_methods(root_module, cls):
yteTagListIteratorItem_methods(root_module, cls):
cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
'), param('size_t const', 'size')])
::string const', 's')])
[])
return
def register_Ns3Inet6SocketAddress_methods(root_module, cls):
is_static=True)
is_const=True)
,
is_const=True)
,
is_const=True)
,
is_const=True)
ess')])
ess')])
return
def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
c=True)
')],
is_const=True)
Address', 'b')],
is_const=True)
')],
is_const=True)
'mask')])
return
def register_Ns3Ipv4RoutingHelper_methods(root_module, cls):
is_static=True)
is_static=True)
', 'stream')],
is_static=True)
', 'stream')],
is_static=True)
is_static=True)
is_static=True)
', 'stream')],
is_static=True)
', 'stream')],
is_static=True)
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
ue)
,
is_const=True)
is_const=True)
is_static=True)
is_static=True)
is_static=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
,
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_static=True)
is_const=True)
is_const=True)
'prefix')],
is_static=True)
'prefix')],
is_static=True)
'prefix')],
is_static=True)
is_static=True)
is_static=True)
is_static=True)
is_static=True)
is_static=True)
,
is_const=True)
,
is_const=True)
ess')])
ess')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
is_const=True)
],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
is_pure_virtual=True, is_const=True, is_virtual=True)
is_static=True)
lue const &', 'value')])
lue const &', 'value')])
'ns3::CallbackBase const &', 'cb')])
ckBase const &', 'cb')])
'ns3::CallbackBase const &', 'cb')])
ckBase const &', 'cb')])
tributes')],
visibility='protected')
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
tory_methods(root_module, cls):
cls.add_output_stream_operator()
3PacketMetadata_methods(root_module, cls):
d')],
is_const=True)
t', 'size')])
is_static=True)
is_static=True)
is_const=True)
is_const=True)
)
)
t', 'size')])
t', 'size')])
ize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
ethods(root_module, cls):
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
is_static=True)
is_static=True)
'id')],
is_static=True)
is_static=True)
is_static=True)
'id')],
is_static=True)
'impl')],
is_static=True)
tory')],
is_static=True)
is_static=True)
time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
is_pure_virtual=True, is_virtual=True)
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
[],
is_static=True)
stream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
, 'trim')])
m('uint32_t', 'size')])
le', 'v')])
, 'data')])
, 'data')])
_t', 'v')])
_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
s):
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[])
[])
[])
'ns3::Time', 'delay')])
me const &', 'delay')])
[])
return
def register_Ns3TimerImpl_methods(root_module, cls):
lay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
const >', 'accessor'), param('std::string', 'callback')])
t32_t', 'i')],
is_const=True)
t32_t', 'i')],
is_const=True)
],
is_const=True)
[],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
32_t', 'i')],
is_static=True)
,
is_static=True)
],
is_const=True)
t32_t', 'i')],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
[])
d', 'other')],
is_const=True)
eInformation *', 'info', transfer_ownership=False)],
is_const=True)
t', 'hash')],
is_static=True)
'ns3::TypeId *', 'tid')],
is_static=True)
g', 'name')],
is_static=True)
ring', 'name')],
is_const=True)
],
is_const=True)
eValue const >', 'initialValue')])
ing', 'groupName')])
3::TypeId', 'tid')])
::size_t', 'size')])
'uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
ion_methods(root_module, cls):
['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
)
return
def register_Ns3DsdvHelper_methods(root_module, cls):
is_const=True, is_virtual=True)
lue const &', 'value')])
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_static=True)
&', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
, 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Ipv4Header_methods(root_module, cls):
],
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True, is_virtual=True)
is_const=True)
is_const=True)
is_const=True, is_virtual=True)
is_const=True)
is_const=True)
is_const=True)
is_static=True)
is_const=True)
is_const=True)
is_const=True)
],
is_const=True, is_virtual=True)
],
is_const=True, is_virtual=True)
tion')])
dscp')])
'ecn')])
ytes')])
tion')])
size')])
'num')])
urce')])
'tos')])
'ttl')])
return
def register_Ns3Object_methods(root_module, cls):
[])
[],
is_const=True)
[],
is_const=True, is_virtual=True)
],
is_static=True)
[])
sibility='protected')
[],
visibility='protected', is_virtual=True)
[],
visibility='protected', is_virtual=True)
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
True, is_virtual=True)
st=True, visibility='protected')
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
rue)
f register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
'flags')])
param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
, 'flags'), param('ns3::Address const &', 'address')])
ns3::empty >', 'newConnectionCreated')])
allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
:empty, ns3::empty >', 'errorClose')])
, ns3::empty >', 'connectionFailed')])
mpty >', 'dataSent')])
, 'ipv4RecvTos')])
, 'ipv4RecvTtl')])
nt8_t', 'ipTos')])
nt8_t', 'ipTtl')],
is_virtual=True)
', 'ipHopLimit')],
is_virtual=True)
v6RecvHopLimit')])
ipv6RecvTclass')])
nt', 'ipTclass')])
3::empty >', 'arg0')])
('bool', 'flag')])
:empty >', 'sendCb')])
[],
is_pure_virtual=True, is_virtual=True)
[],
is_pure_virtual=True, is_virtual=True)
[],
visibility='protected', is_virtual=True)
is_const=True, visibility='protected')
is_const=True, visibility='protected')
is_const=True, visibility='protected')
is_const=True, visibility='protected')
[],
visibility='protected')
onst &', 'from')],
visibility='protected')
[],
visibility='protected')
[],
visibility='protected')
nt32_t', 'size')],
visibility='protected')
[],
visibility='protected')
ns3::Address const &', 'from')],
visibility='protected')
[],
visibility='protected')
spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketAddressTag_methods(root_module, cls):
=True, is_virtual=True)
is_const=True, is_virtual=True)
is_static=True)
],
is_const=True, is_virtual=True)
],
is_const=True, is_virtual=True)
addr')])
return
def register_Ns3SocketIpTosTag_methods(root_module, cls):
True)
is_const=True, is_virtual=True)
is_const=True)
is_static=True)
')],
is_const=True, is_virtual=True)
')],
is_const=True, is_virtual=True)
, 'tos')])
return
def register_Ns3SocketIpTtlTag_methods(root_module, cls):
True)
is_const=True, is_virtual=True)
is_const=True)
is_static=True)
')],
is_const=True, is_virtual=True)
')],
is_const=True, is_virtual=True)
, 'ttl')])
return
def register_Ns3SocketIpv6HopLimitTag_methods(root_module, cls):
s_const=True, is_virtual=True)
is_static=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
)])
return
def register_Ns3SocketIpv6TclassTag_methods(root_module, cls):
e, is_virtual=True)
is_const=True)
is_static=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
s')])
return
def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls):
is_const=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
'ns3::Time::Unit', 'unit')],
is_static=True)
'ns3::Time::Unit', 'unit')],
is_static=True)
'ns3::Time::Unit', 'unit')],
is_static=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_static=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_static=True)
[],
is_static=True)
'resolution')],
is_static=True)
[],
is_static=True)
:Unit', 'unit')],
is_const=True)
:Unit', 'unit')],
is_const=True)
:Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
ackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
tual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_static=True)
', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
rue)
=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
c=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
c=True)
virtual=True)
s_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
atic=True)
egister_Ns3ZipfRandomVariable_methods(root_module, cls):
atic=True)
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
s_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
s3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
')],
is_const=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
er')],
is_pure_virtual=True, is_virtual=True)
er')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
lue_methods(root_module, cls):
cker')],
is_const=True, is_virtual=True)
', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
=True)
omVariable_methods(root_module, cls):
)
t_module, cls):
rg4')],
visibility='private', is_virtual=True)
ibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
ibility='private', is_virtual=True)
,
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
ic=True)
True)
return
def register_Ns3EventImpl_methods(root_module, cls):
ity='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
ue)
andomVariable_methods(root_module, cls):
tic=True)
virtual=True)
return
def register_Ns3Ipv4_methods(root_module, cls):
is_pure_virtual=True, is_virtual=True)
tDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
[],
is_pure_virtual=True, is_virtual=True)
:Socket >', 'socket')],
is_pure_virtual=True, is_virtual=True)
int32_t', 'addressIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
t >', 'device')],
is_pure_virtual=True, is_const=True, is_virtual=True)
('ns3::Ipv4Mask', 'mask')],
is_pure_virtual=True, is_const=True, is_virtual=True)
'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
nt32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
rotocolNumber')],
is_pure_virtual=True, is_const=True, is_virtual=True)
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
],
is_static=True)
tocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
param('uint32_t', 'iif')],
is_pure_virtual=True, is_const=True, is_virtual=True)
'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
ram('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_virtual=True)
('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_virtual=True)
ceAddress::InterfaceAddressScope_e', 'scope')],
is_pure_virtual=True, is_virtual=True)
am('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
terface'), param('bool', 'val')],
is_pure_virtual=True, is_virtual=True)
'), param('uint16_t', 'metric')],
is_pure_virtual=True, is_virtual=True)
', 'routingProtocol')],
is_pure_virtual=True, is_virtual=True)
32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
is_const=True)
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
('bool', 'forward')],
is_pure_virtual=True, visibility='private', is_virtual=True)
am('bool', 'model')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
er_Ns3Ipv4Interface_methods(root_module, cls):
nst=True)
is_const=True)
is_static=True)
is_const=True)
is_const=True)
is_const=True)
])
])
ress', 'dest')])
')])
')])
])
')])
visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4L3Protocol_methods(root_module, cls):
is_virtual=True)
is_virtual=True)
,
is_virtual=True)
x')],
is_const=True, is_virtual=True)
is_const=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
k')],
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
,
is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_static=True)
,
is_virtual=True)
f')],
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True)
is_const=True, is_virtual=True)
&', 'to'), param('ns3::NetDevice::PacketType', 'packetType')])
)
ssIndex')],
is_virtual=True)
address')],
is_virtual=True)
dressScope_e', 'scope')],
is_virtual=True)
l'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_virtual=True)
3::Ipv4Route >', 'route')],
is_virtual=True)
is_virtual=True)
', 'val')],
is_virtual=True)
'metric')],
is_virtual=True)
)
,
is_virtual=True)
is_virtual=True)
visibility='protected', is_virtual=True)
visibility='protected', is_virtual=True)
is_const=True, visibility='private', is_virtual=True)
is_const=True, visibility='private', is_virtual=True)
visibility='private', is_virtual=True)
visibility='private', is_virtual=True)
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
ual=True)
e')])
return
def register_Ns3Ipv4MulticastRoute_methods(root_module, cls):
ut_stream_operator()
_const=True)
is_const=True)
'dest')])
', 'gw')])
utDevice')])
, 'src')])
return
def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls):
ure_virtual=True, is_virtual=True)
,
is_pure_virtual=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_pure_virtual=True, is_virtual=True)
t::SocketErrno &', 'sockerr')],
is_pure_virtual=True, is_virtual=True)
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
er_Ns3Ipv6PrefixChecker_methods(root_module, cls):
rn
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
True)
gister_Ns3NetDevice_methods(root_module, cls):
e)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
p')],
is_pure_virtual=True, is_const=True, is_virtual=True)
r')],
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_static=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
address')],
is_pure_virtual=True, is_virtual=True)
'index')],
is_pure_virtual=True, is_virtual=True)
', 'mtu')],
is_pure_virtual=True, is_virtual=True)
>', 'node')],
is_pure_virtual=True, is_virtual=True)
mpty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
pty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
is_const=True)
m('uint32_t', 'size')])
rOfBits')])
is_const=True)
_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
is_static=True)
t32_t', 'index')],
is_const=True)
t32_t', 'index')],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_static=True)
empty, ns3::empty >', 'listener')])
< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
empty, ns3::empty >', 'listener')])
y, ns3::empty >', 'handler')])
[],
visibility='protected', is_virtual=True)
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
_t', 'bound')])
is_virtual=True)
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
Wrapper_methods(root_module, cls):
railer')])
is_const=True)
],
is_const=True)
am('uint32_t', 'size')],
is_const=True)
am('uint32_t', 'size')],
is_const=True)
am('uint32_t', 'length')],
is_const=True)
is_static=True)
is_static=True)
&', 'tag')],
is_const=True)
is_const=True)
],
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
'header')],
is_const=True)
&', 'tag')],
is_const=True)
r &', 'trailer')])
&', 'os')],
is_const=True)
&', 'os')],
is_const=True)
&', 'os')],
is_const=True)
[])
[])
nt32_t', 'size')])
nt32_t', 'size')])
er &', 'header')])
::Tag &', 'tag')])
r &', 'trailer')])
::Tag &', 'tag')])
'uint32_t', 'maxSize')],
is_const=True)
r >', 'nixVector')])
is_const=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
ic=True)
is_virtual=True)
is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
],
is_virtual=True)
is_const=True)
, 'checker')],
is_const=True, is_virtual=True)
nst &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
e)
is_const=True)
checker')],
is_const=True, is_virtual=True)
&', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
ue)
ker')],
is_const=True, is_virtual=True)
'value')])
return
def register_Ns3Ipv4ListRouting_methods(root_module, cls):
is_const=True, is_virtual=True)
s_static=True)
dress')],
is_virtual=True)
is_virtual=True)
is_virtual=True)
dress')],
is_virtual=True)
is_const=True, is_virtual=True)
st &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_virtual=True)
3::Socket::SocketErrno &', 'sockerr')],
is_virtual=True)
is_virtual=True)
visibility='protected', is_virtual=True)
visibility='protected', is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
_virtual=True)
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
is_virtual=True)
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
def register_Ns3HashFunctionHash64_methods(root_module, cls):
irtual=True)
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
is_virtual=True)
return
def register_Ns3DsdvDsdvHeader_methods(root_module, cls):
cls.add_output_stream_operator()
],
is_virtual=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_static=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
on')])
er')])
nt')])
return
def register_Ns3DsdvPacketQueue_methods(root_module, cls):
def register_Ns3DsdvQueueEntry_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
pty, ns3::empty>()')])
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
b')])
])
ucb')])
return
def register_Ns3DsdvRoutingProtocol_methods(root_module, cls):
rtual=True)
is_virtual=True)
)],
is_virtual=True)
is_const=True, is_virtual=True)
ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_virtual=True)
ket::SocketErrno &', 'sockerr')],
is_virtual=True)
is_virtual=True)
ister_Ns3DsdvRoutingTable_methods(root_module, cls):
'changedEntries', default_value='false')])
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
is_const=True)
)
)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_dsdv(module.get_submodule('dsdv'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_dsdv(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| true | true |
1c4729d25dbb86b38c2d886e908b0f130e069aaf | 325 | py | Python | prev_exams_and_ans/2018ccc/q3_are_we_there_yet/are_we_there_yet.py | Yueleng/ccc_cco_material | e064a76a8bdc07cbfdba3ec8f6849cc1b90317e0 | [
"MIT"
] | null | null | null | prev_exams_and_ans/2018ccc/q3_are_we_there_yet/are_we_there_yet.py | Yueleng/ccc_cco_material | e064a76a8bdc07cbfdba3ec8f6849cc1b90317e0 | [
"MIT"
] | null | null | null | prev_exams_and_ans/2018ccc/q3_are_we_there_yet/are_we_there_yet.py | Yueleng/ccc_cco_material | e064a76a8bdc07cbfdba3ec8f6849cc1b90317e0 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
distance = [int(s) for s in input().split()]
for i in range(0, len(distance) + 1):
for j in range(0, len(distance) + 1):
# note that sum([]) = 0 which satifies our expectation.
print(sum(distance[i:j]) if i < j else sum(distance[j:i]), " ", end='')
print()
| 29.545455 | 79 | 0.590769 | import sys
input = sys.stdin.readline
distance = [int(s) for s in input().split()]
for i in range(0, len(distance) + 1):
for j in range(0, len(distance) + 1):
print(sum(distance[i:j]) if i < j else sum(distance[j:i]), " ", end='')
print()
| true | true |
1c4729da299e51dced35f7771e1d6476c0413115 | 2,072 | py | Python | src/opnsense/scripts/systemhealth/fetchData.py | Kipjr/core | 37a1b761d7ee8b0f02fc1daa02dc913e96e04737 | [
"BSD-2-Clause"
] | 2,109 | 2015-01-02T15:42:12.000Z | 2022-03-31T20:16:10.000Z | src/opnsense/scripts/systemhealth/fetchData.py | Kipjr/core | 37a1b761d7ee8b0f02fc1daa02dc913e96e04737 | [
"BSD-2-Clause"
] | 5,452 | 2015-01-03T10:30:09.000Z | 2022-03-31T19:58:33.000Z | src/opnsense/scripts/systemhealth/fetchData.py | Kipjr/core | 37a1b761d7ee8b0f02fc1daa02dc913e96e04737 | [
"BSD-2-Clause"
] | 834 | 2015-01-04T05:29:59.000Z | 2022-03-30T00:45:49.000Z | #!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
fetch xmldata from rrd tool, but only if filename is valid (with or without .rrd extension)
"""
import sys
import glob
import tempfile
import subprocess
import os.path
rrd_reports_dir = '/var/db/rrd'
if len(sys.argv) > 1:
filename = sys.argv[1]
# suffix rrd if not already in request
if filename.split('.')[-1] != 'rrd':
filename += '.rrd'
# scan rrd directory for requested file
for rrdFilename in glob.glob('%s/*.rrd' % rrd_reports_dir):
if os.path.basename(rrdFilename) == filename:
subprocess.run(['/usr/local/bin/rrdtool', 'dump', rrdFilename])
break
| 41.44 | 95 | 0.693533 |
import sys
import glob
import tempfile
import subprocess
import os.path
rrd_reports_dir = '/var/db/rrd'
if len(sys.argv) > 1:
filename = sys.argv[1]
if filename.split('.')[-1] != 'rrd':
filename += '.rrd'
for rrdFilename in glob.glob('%s/*.rrd' % rrd_reports_dir):
if os.path.basename(rrdFilename) == filename:
subprocess.run(['/usr/local/bin/rrdtool', 'dump', rrdFilename])
break
| true | true |
1c4729ed525f7921854f955b75cff5d38378f20c | 747 | py | Python | 1_languages/python/src/misc/spiral.py | praisetompane/3_programming | dd3e2e89a36a613d895fdbdd9c03845cb648fddf | [
"MIT"
] | null | null | null | 1_languages/python/src/misc/spiral.py | praisetompane/3_programming | dd3e2e89a36a613d895fdbdd9c03845cb648fddf | [
"MIT"
] | null | null | null | 1_languages/python/src/misc/spiral.py | praisetompane/3_programming | dd3e2e89a36a613d895fdbdd9c03845cb648fddf | [
"MIT"
] | null | null | null | import turtle
#note: the horizontal and vertical distance between the sides is constant
#hence (distance + 2)
def spiral(turtle, counter, sides, distance, angle):
if counter == sides: return
else:
turtle.forward(distance)
turtle.right(angle)
spiral(turtle, counter + 1, sides, distance + 2 , angle)
def main():
wn = turtle.Screen()
tess = turtle.Turtle()
tess.color('blue')
tess.right(90)
tess.forward(1)
distance = 1
sides = 50
spiral(tess, 0, sides, distance, 90)
tess.penup()
tess.goto(sides * 3 ,0)
tess.pendown()
tess.left(180)
spiral(tess, 0, sides, distance, 91)
#spiralSkewed(tess, 1, upperBound, distance, 90)
wn.exitonclick()
main()
| 22.636364 | 74 | 0.626506 | import turtle
def spiral(turtle, counter, sides, distance, angle):
if counter == sides: return
else:
turtle.forward(distance)
turtle.right(angle)
spiral(turtle, counter + 1, sides, distance + 2 , angle)
def main():
wn = turtle.Screen()
tess = turtle.Turtle()
tess.color('blue')
tess.right(90)
tess.forward(1)
distance = 1
sides = 50
spiral(tess, 0, sides, distance, 90)
tess.penup()
tess.goto(sides * 3 ,0)
tess.pendown()
tess.left(180)
spiral(tess, 0, sides, distance, 91)
wn.exitonclick()
main()
| true | true |
1c472a0eb3fa196b00b8c513884f73e6218c1cd3 | 545 | py | Python | var/spack/repos/builtin/packages/py-pydot2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-pydot2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/py-pydot2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyPydot2(PythonPackage):
"""Python interface to Graphviz's Dot"""
pypi = "pydot2/pydot2-1.0.33.tar.gz"
version('1.0.33', sha256='02c0e681a1c437077e2bb2522fb81fa322e53ba7002cfda8b894db0392a1bc9b')
depends_on('py-setuptools', type='build')
depends_on('py-pyparsing', type=('build', 'run'))
| 30.277778 | 96 | 0.73578 |
from spack.package import *
class PyPydot2(PythonPackage):
pypi = "pydot2/pydot2-1.0.33.tar.gz"
version('1.0.33', sha256='02c0e681a1c437077e2bb2522fb81fa322e53ba7002cfda8b894db0392a1bc9b')
depends_on('py-setuptools', type='build')
depends_on('py-pyparsing', type=('build', 'run'))
| true | true |
1c472b7b277dd66b68dcd06802f358e5dac36e8a | 166 | py | Python | randconv/image/test/testpalindrome.py | jm-begon/randconv | cb7438f5876c18192e8caaf3cafd88e839c26048 | [
"BSD-3-Clause"
] | 1 | 2016-08-01T08:09:28.000Z | 2016-08-01T08:09:28.000Z | randconv/image/test/testpalindrome.py | jm-begon/randconv | cb7438f5876c18192e8caaf3cafd88e839c26048 | [
"BSD-3-Clause"
] | null | null | null | randconv/image/test/testpalindrome.py | jm-begon/randconv | cb7438f5876c18192e8caaf3cafd88e839c26048 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test
"""
__author__ = "Begon Jean-Michel <jm.begon@gmail.com>"
__copyright__ = "3-clause BSD License"
__version__ = 'dev'
import nose
| 12.769231 | 53 | 0.650602 |
__author__ = "Begon Jean-Michel <jm.begon@gmail.com>"
__copyright__ = "3-clause BSD License"
__version__ = 'dev'
import nose
| true | true |
1c472b91e69c4fdfd62585e15e4bdb9819785323 | 5,705 | py | Python | tests/support/cptestcase.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 19 | 2016-01-29T14:37:52.000Z | 2022-03-30T18:08:01.000Z | tests/support/cptestcase.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 223 | 2016-03-02T16:39:41.000Z | 2022-03-03T12:26:35.000Z | tests/support/cptestcase.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 64 | 2016-02-04T19:45:26.000Z | 2021-12-15T02:02:31.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Sylvain Hellegouarch
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Sylvain Hellegouarch nor the names of his contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Modified from the original. See the Git history of this file for details.
# https://bitbucket.org/Lawouach/cherrypy-recipes/src/50aff88dc4e24206518ec32e1c32af043f2729da/testing/unit/serverless/cptestcase.py
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import 3rd-party libs
# pylint: disable=import-error
import cherrypy # pylint: disable=3rd-party-module-not-gated
import salt.utils.stringutils
from salt.ext import six
from salt.ext.six import BytesIO
# Import Salt Testing libs
from tests.support.case import TestCase
# pylint: enable=import-error
# Not strictly speaking mandatory but just makes sense
cherrypy.config.update({"environment": "test_suite"})
# This is mandatory so that the HTTP server isn't started
# if you need to actually start (why would you?), simply
# subscribe it back.
cherrypy.server.unsubscribe()
# simulate fake socket address... they are irrelevant in our context
local = cherrypy.lib.httputil.Host("127.0.0.1", 50000, "")
remote = cherrypy.lib.httputil.Host("127.0.0.1", 50001, "")
__all__ = ["BaseCherryPyTestCase"]
class BaseCherryPyTestCase(TestCase):
def request(
self,
path="/",
method="GET",
app_path="",
scheme="http",
proto="HTTP/1.1",
body=None,
qs=None,
headers=None,
**kwargs
):
"""
CherryPy does not have a facility for serverless unit testing.
However this recipe demonstrates a way of doing it by
calling its internal API to simulate an incoming request.
This will exercise the whole stack from there.
Remember a couple of things:
* CherryPy is multithreaded. The response you will get
from this method is a thread-data object attached to
the current thread. Unless you use many threads from
within a unit test, you can mostly forget
about the thread data aspect of the response.
* Responses are dispatched to a mounted application's
page handler, if found. This is the reason why you
must indicate which app you are targeting with
this request by specifying its mount point.
You can simulate various request settings by setting
the `headers` parameter to a dictionary of headers,
the request's `scheme` or `protocol`.
.. seealso: http://docs.cherrypy.org/stable/refman/_cprequest.html#cherrypy._cprequest.Response
"""
# This is a required header when running HTTP/1.1
h = {"Host": "127.0.0.1"}
# if we had some data passed as the request entity
# let's make sure we have the content-length set
fd = None
if body is not None:
h["content-length"] = "{0}".format(len(body))
fd = BytesIO(salt.utils.stringutils.to_bytes(body))
if headers is not None:
h.update(headers)
# Get our application and run the request against it
app = cherrypy.tree.apps.get(app_path)
if not app:
# XXX: perhaps not the best exception to raise?
raise AssertionError("No application mounted at '{0}'".format(app_path))
# Cleanup any previous returned response
# between calls to this method
app.release_serving()
# Let's fake the local and remote addresses
request, response = app.get_serving(local, remote, scheme, proto)
try:
h = [(k, v) for k, v in six.iteritems(h)]
response = request.run(method, path, qs, proto, h, fd)
finally:
if fd:
fd.close()
fd = None
if response.output_status.startswith(b"500"):
response_body = response.collapse_body()
if six.PY3:
response_body = response_body.decode(__salt_system_encoding__)
print(response_body)
raise AssertionError("Unexpected error")
# collapse the response into a bytestring
response.collapse_body()
return request, response
| 39.895105 | 132 | 0.684663 |
from __future__ import absolute_import, print_function, unicode_literals
import cherrypy
import salt.utils.stringutils
from salt.ext import six
from salt.ext.six import BytesIO
from tests.support.case import TestCase
cherrypy.config.update({"environment": "test_suite"})
# if you need to actually start (why would you?), simply
# subscribe it back.
cherrypy.server.unsubscribe()
# simulate fake socket address... they are irrelevant in our context
local = cherrypy.lib.httputil.Host("127.0.0.1", 50000, "")
remote = cherrypy.lib.httputil.Host("127.0.0.1", 50001, "")
__all__ = ["BaseCherryPyTestCase"]
class BaseCherryPyTestCase(TestCase):
def request(
self,
path="/",
method="GET",
app_path="",
scheme="http",
proto="HTTP/1.1",
body=None,
qs=None,
headers=None,
**kwargs
):
# This is a required header when running HTTP/1.1
h = {"Host": "127.0.0.1"}
# if we had some data passed as the request entity
# let's make sure we have the content-length set
fd = None
if body is not None:
h["content-length"] = "{0}".format(len(body))
fd = BytesIO(salt.utils.stringutils.to_bytes(body))
if headers is not None:
h.update(headers)
app = cherrypy.tree.apps.get(app_path)
if not app:
raise AssertionError("No application mounted at '{0}'".format(app_path))
app.release_serving()
request, response = app.get_serving(local, remote, scheme, proto)
try:
h = [(k, v) for k, v in six.iteritems(h)]
response = request.run(method, path, qs, proto, h, fd)
finally:
if fd:
fd.close()
fd = None
if response.output_status.startswith(b"500"):
response_body = response.collapse_body()
if six.PY3:
response_body = response_body.decode(__salt_system_encoding__)
print(response_body)
raise AssertionError("Unexpected error")
# collapse the response into a bytestring
response.collapse_body()
return request, response
| true | true |
1c472bcd2028610b858c56c919d0ed52ec7eb5d7 | 4,370 | py | Python | Element3/read_and_run.py | AuckeBos/MLiPPaA | 4b6c563f93e1eb7fc90f66a9a6ada16c07664d71 | [
"MIT"
] | 1 | 2021-06-03T13:23:39.000Z | 2021-06-03T13:23:39.000Z | Element3/read_and_run.py | AuckeBos/MLiPPaA | 4b6c563f93e1eb7fc90f66a9a6ada16c07664d71 | [
"MIT"
] | null | null | null | Element3/read_and_run.py | AuckeBos/MLiPPaA | 4b6c563f93e1eb7fc90f66a9a6ada16c07664d71 | [
"MIT"
] | null | null | null | import argparse
import csv
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
import Element2.Evaluator
from Element2.BaseClassification import BaseClassifier
# As computed by the training data distribution (RebalanceTrainVal=False)
multi_train_prior = np.array([.5, .125, .125, .125, .125])
binary_train_prior = np.array([.5, .5])
multi_test_prior = np.array([.04, .02, .19, .51, .24])
binary_test_prior = np.array([.96, .04])
def read():
"""
Read command line arguments for the script:
- --data-file: The data file with the data to test. If not provided, use ExamData.csv in /data
- --classification-type: Classify binary or multiclass
- --model: Which type of model to use: The BinaryClassifier, MultiClassifier, or RecurrentClassifier
- --h5: The h5 file of the pretrained model, should match with --model
- --bayes: Apply bayes on the predictions
@return:
"""
parser = argparse.ArgumentParser(
description='Load a model, test them on a test dataset; save predictions to csv',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-d', '--data-file', type=str, required=False, default='../Element2/data/ExamData2.csv', help='The datafile containing the test data')
parser.add_argument('-t', '--classification-type', type=str, required=True, choices=['binary', 'multi'], help='Classification type: multi label or binary')
parser.add_argument('-m', '--model', type=str, required=True, choices=['binary', 'multi', 'recurrent'], help='Classification model: BinaryClassifier, MultiClassifier, or RecurrentClassifier')
parser.add_argument('-h5', '--h5', type=str, required=True, help='The h5 file of the saved model')
parser.add_argument('-b', '--bayes', type=str, required=True, choices=['True', 'False'], help='Apply bayes to the prediction outputs?')
args = parser.parse_args()
return args.data_file, args.classification_type, args.model, args.h5, bool(args.bayes)
def run(data_file: str, classification_type: str, model_type: str, h5: str, apply_bayes: bool):
"""
After commandline args have been read, run the model:
- Load the classifier
- Load the data
- Predict the data
- Generate csv in the desired format (predictions.csv)
@param data_file: The file that contains the testset
@param classification_type: The type of classification: binary or multi
@param model_type: The classifier type: binary, multi, recurrent
@param h5: The h5 file of the trained model
@param apply_bayes: Bool that indicates whether to apply bayes on the predictions
"""
classifier = Element2.Evaluator.Evaluator.parse_classifier_type(model_type)
classifier.apply_bayes = apply_bayes
if model_type == 'binary':
classifier.train_prior = binary_train_prior
classifier.test_prior = binary_test_prior
else: # Multi or recurrent
classifier.train_prior = multi_train_prior
classifier.test_prior = multi_test_prior
net = load_model(h5, custom_objects={'f1': BaseClassifier.f1, 'loss': classifier.loss()})
# Use manual label mapping for multi classifier:
predictions_to_labels = ['4top', 'ttbar', 'ttbarHiggs', 'ttbarW', 'ttbarZ']
# Define the number of objects per row. Needed because we need to have the exact same input shape as during training, otherwise
# The network won't be able to predict. Note that this does not decrease performance, since the network will mask them out
objects_per_row = 19
ids = pd.read_csv(data_file, delimiter=';', usecols=[0], names=['EventID'])['EventID'].tolist()
x, _ = classifier.load_data(data_file, False, objects_per_row)
predictions = classifier.predict(net, x)
with open('predictions.csv', 'w') as file:
writer = csv.writer(file, delimiter=',')
for (prediction, id) in zip(predictions, ids):
# Prefix with labels
if classification_type == 'binary': # prediction[0] must be the probability of 4-top
prediction = [f'4top={prediction[0]}']
else: # multi: prediction is array of probs
prediction = [f'{label}={value}' for (label, value) in zip(predictions_to_labels, prediction)]
writer.writerow([int(id)] + prediction)
if __name__ == '__main__':
run(*read())
| 50.813953 | 195 | 0.701373 | import argparse
import csv
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
import Element2.Evaluator
from Element2.BaseClassification import BaseClassifier
multi_train_prior = np.array([.5, .125, .125, .125, .125])
binary_train_prior = np.array([.5, .5])
multi_test_prior = np.array([.04, .02, .19, .51, .24])
binary_test_prior = np.array([.96, .04])
def read():
parser = argparse.ArgumentParser(
description='Load a model, test them on a test dataset; save predictions to csv',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-d', '--data-file', type=str, required=False, default='../Element2/data/ExamData2.csv', help='The datafile containing the test data')
parser.add_argument('-t', '--classification-type', type=str, required=True, choices=['binary', 'multi'], help='Classification type: multi label or binary')
parser.add_argument('-m', '--model', type=str, required=True, choices=['binary', 'multi', 'recurrent'], help='Classification model: BinaryClassifier, MultiClassifier, or RecurrentClassifier')
parser.add_argument('-h5', '--h5', type=str, required=True, help='The h5 file of the saved model')
parser.add_argument('-b', '--bayes', type=str, required=True, choices=['True', 'False'], help='Apply bayes to the prediction outputs?')
args = parser.parse_args()
return args.data_file, args.classification_type, args.model, args.h5, bool(args.bayes)
def run(data_file: str, classification_type: str, model_type: str, h5: str, apply_bayes: bool):
classifier = Element2.Evaluator.Evaluator.parse_classifier_type(model_type)
classifier.apply_bayes = apply_bayes
if model_type == 'binary':
classifier.train_prior = binary_train_prior
classifier.test_prior = binary_test_prior
else:
classifier.train_prior = multi_train_prior
classifier.test_prior = multi_test_prior
net = load_model(h5, custom_objects={'f1': BaseClassifier.f1, 'loss': classifier.loss()})
predictions_to_labels = ['4top', 'ttbar', 'ttbarHiggs', 'ttbarW', 'ttbarZ']
objects_per_row = 19
ids = pd.read_csv(data_file, delimiter=';', usecols=[0], names=['EventID'])['EventID'].tolist()
x, _ = classifier.load_data(data_file, False, objects_per_row)
predictions = classifier.predict(net, x)
with open('predictions.csv', 'w') as file:
writer = csv.writer(file, delimiter=',')
for (prediction, id) in zip(predictions, ids):
# Prefix with labels
if classification_type == 'binary': # prediction[0] must be the probability of 4-top
prediction = [f'4top={prediction[0]}']
else: # multi: prediction is array of probs
prediction = [f'{label}={value}' for (label, value) in zip(predictions_to_labels, prediction)]
writer.writerow([int(id)] + prediction)
if __name__ == '__main__':
run(*read())
| true | true |
1c472de12a097cc05b9c30c9abaaff2a7aa12279 | 376 | py | Python | product/urls.py | benilyxdd/shop-web-django | 93a9549faad2703118eb2b34be3053408e7297fa | [
"MIT"
] | null | null | null | product/urls.py | benilyxdd/shop-web-django | 93a9549faad2703118eb2b34be3053408e7297fa | [
"MIT"
] | null | null | null | product/urls.py | benilyxdd/shop-web-django | 93a9549faad2703118eb2b34be3053408e7297fa | [
"MIT"
] | null | null | null | from django.urls import path
from .views import api_products, modify_products_gui, create_product, change_product, delete_product
urlpatterns = [
path('api/', api_products),
path('create/', create_product.as_view()),
path('modify/<int:id>', change_product.as_view()),
path('delete/<int:id>', delete_product.as_view()),
path('gui/', modify_products_gui)
]
| 34.181818 | 100 | 0.720745 | from django.urls import path
from .views import api_products, modify_products_gui, create_product, change_product, delete_product
urlpatterns = [
path('api/', api_products),
path('create/', create_product.as_view()),
path('modify/<int:id>', change_product.as_view()),
path('delete/<int:id>', delete_product.as_view()),
path('gui/', modify_products_gui)
]
| true | true |
1c472e6ac9ef7ae408c43df4b67c727e1444cf81 | 2,158 | py | Python | swap_user/otp/sites.py | artinnok/django-swap-user | f2c02b9fc5829651a6dab9c6d053dfe2425e2266 | [
"MIT"
] | null | null | null | swap_user/otp/sites.py | artinnok/django-swap-user | f2c02b9fc5829651a6dab9c6d053dfe2425e2266 | [
"MIT"
] | null | null | null | swap_user/otp/sites.py | artinnok/django-swap-user | f2c02b9fc5829651a6dab9c6d053dfe2425e2266 | [
"MIT"
] | null | null | null | from typing import Optional
from django.contrib import admin
from django.http import HttpRequest
from django.urls import path
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from swap_user.otp.views import CheckOTPView, GetOTPView
class OTPUserSite(admin.AdminSite):
def get_urls(self):
"""
Here we are adding a new route /check-otp/ to the parent routes.
"""
default_urls = super().get_urls()
custom_urls = [
path("check-otp/", self.check_otp, name="check-otp"),
]
# Order is matter
urls = custom_urls + default_urls
return urls
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def login(self, request: HttpRequest, extra_context: Optional[dict] = None):
"""
At this view handler we are registering custom `GetOTPView`
which sends an OTP to user via provided sender.
"""
request.current_app = self.name
context = self._get_context(request, extra_context)
return GetOTPView.as_view(**context)(request)
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def check_otp(self, request: HttpRequest, extra_context: Optional[dict] = None):
"""
This view checks received OTP with OTP cached at backend side.
"""
request.current_app = self.name
context = self._get_context(request, extra_context)
return CheckOTPView.as_view(**context)(request)
def _get_context(self, request, extra_context: dict) -> dict:
"""
Let's create a context for view
Ref - django.contrib.admin.sites#login
"""
context = {
**self.each_context(request),
**(extra_context or {}),
}
defaults = {
"extra_context": context,
}
return defaults
| 29.561644 | 84 | 0.663114 | from typing import Optional
from django.contrib import admin
from django.http import HttpRequest
from django.urls import path
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from swap_user.otp.views import CheckOTPView, GetOTPView
class OTPUserSite(admin.AdminSite):
def get_urls(self):
default_urls = super().get_urls()
custom_urls = [
path("check-otp/", self.check_otp, name="check-otp"),
]
urls = custom_urls + default_urls
return urls
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def login(self, request: HttpRequest, extra_context: Optional[dict] = None):
request.current_app = self.name
context = self._get_context(request, extra_context)
return GetOTPView.as_view(**context)(request)
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def check_otp(self, request: HttpRequest, extra_context: Optional[dict] = None):
request.current_app = self.name
context = self._get_context(request, extra_context)
return CheckOTPView.as_view(**context)(request)
def _get_context(self, request, extra_context: dict) -> dict:
context = {
**self.each_context(request),
**(extra_context or {}),
}
defaults = {
"extra_context": context,
}
return defaults
| true | true |
1c472f3dda2439d6bf85f18ef27843b91bd17e74 | 3,914 | py | Python | manager/master/msgCell.py | Tootooroo/VerManager | 65a37ed4f864c8d6adeade52582315aeff901fbe | [
"MIT"
] | 2 | 2020-03-20T20:04:54.000Z | 2021-03-18T12:03:54.000Z | manager/master/msgCell.py | Tootooroo/VerManager | 65a37ed4f864c8d6adeade52582315aeff901fbe | [
"MIT"
] | null | null | null | manager/master/msgCell.py | Tootooroo/VerManager | 65a37ed4f864c8d6adeade52582315aeff901fbe | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Gcom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
import asyncio
import typing as T
from client.messages import Message
from manager.master.exceptions import UNABLE_SEND_MSG_TO_PROXY, \
MSG_WRAPPER_CFG_NOT_EXISTS
import manager.master.proxy_configs as ProxyCfg
class MsgWrapper:
ON = 'ON'
OFF = 'OFF'
def __init__(self, msg: Message) -> None:
self.msg = msg
self.config_map = {} # type: T.Dict[str, str]
def get_msg(self) -> Message:
return self.msg
def add_config(self, cfg_key: str, cfg_val: str) -> None:
"""
Add config to config_map.
"""
self.config_map[cfg_key] = cfg_val
def get_config(self, config_key: str) -> T.Optional[str]:
if config_key not in self.config_map:
return None
return self.config_map[config_key]
class MsgSource(abc.ABC):
def __init__(self, src_id: str) -> None:
self.src_id = src_id
# Used by sendMsg to transfer message to
# Proxy, seted by Proxy while added to Proxy.
self._q = None # type: T.Optional[asyncio.Queue]
def setQ(self, q: asyncio.Queue) -> None:
self._q = q
def real_time_broadcast(self, msg: Message, configs: T.Dict[str, str]) -> None:
if "is_broadcast" not in configs or \
configs["is_broadcast"] != "ON":
configs["is_broadcast"] = "ON"
self.real_time_msg(msg, configs)
def real_time_msg(self, msg: Message, configs: T.Dict[str, str]) -> None:
"""
Wrap a message into a MsgWrapper with control info then
send the MsgWrapper to Proxy
"""
# Wrap message with configs
wrapper = MsgWrapper(msg)
for key in configs:
wrapper.add_config(key, configs[key])
try:
if self._q is not None:
self._q.put_nowait(wrapper)
except asyncio.QueueFull:
raise UNABLE_SEND_MSG_TO_PROXY(
"Proxy's message queue is full"
)
def real_time_msg_available(self) -> bool:
return self._q is not None and not self._q.full()
@abc.abstractmethod
async def gen_msg(self, args: T.List[str] = None) -> T.Optional[Message]:
"""
Generate Message
Require: noblocked, noexcept
"""
class MsgUnit:
def __init__(self, msg_type: str, source: MsgSource,
config: T.Dict[str, str]) -> None:
self._type = msg_type
self._source = source
self._config = config
def src_id(self) -> str:
return self._source.src_id
def msg_type(self) -> str:
return self._type
def config(self) -> T.Dict[str, str]:
return self._config
async def gen_msg(self, args: T.List[str] = None) -> T.Optional[Message]:
return await self._source.gen_msg(args)
| 31.312 | 83 | 0.653807 |
import abc
import asyncio
import typing as T
from client.messages import Message
from manager.master.exceptions import UNABLE_SEND_MSG_TO_PROXY, \
MSG_WRAPPER_CFG_NOT_EXISTS
import manager.master.proxy_configs as ProxyCfg
class MsgWrapper:
ON = 'ON'
OFF = 'OFF'
def __init__(self, msg: Message) -> None:
self.msg = msg
self.config_map = {}
def get_msg(self) -> Message:
return self.msg
def add_config(self, cfg_key: str, cfg_val: str) -> None:
self.config_map[cfg_key] = cfg_val
def get_config(self, config_key: str) -> T.Optional[str]:
if config_key not in self.config_map:
return None
return self.config_map[config_key]
class MsgSource(abc.ABC):
def __init__(self, src_id: str) -> None:
self.src_id = src_id
self._q = None
def setQ(self, q: asyncio.Queue) -> None:
self._q = q
def real_time_broadcast(self, msg: Message, configs: T.Dict[str, str]) -> None:
if "is_broadcast" not in configs or \
configs["is_broadcast"] != "ON":
configs["is_broadcast"] = "ON"
self.real_time_msg(msg, configs)
def real_time_msg(self, msg: Message, configs: T.Dict[str, str]) -> None:
wrapper = MsgWrapper(msg)
for key in configs:
wrapper.add_config(key, configs[key])
try:
if self._q is not None:
self._q.put_nowait(wrapper)
except asyncio.QueueFull:
raise UNABLE_SEND_MSG_TO_PROXY(
"Proxy's message queue is full"
)
def real_time_msg_available(self) -> bool:
return self._q is not None and not self._q.full()
@abc.abstractmethod
async def gen_msg(self, args: T.List[str] = None) -> T.Optional[Message]:
class MsgUnit:
def __init__(self, msg_type: str, source: MsgSource,
config: T.Dict[str, str]) -> None:
self._type = msg_type
self._source = source
self._config = config
def src_id(self) -> str:
return self._source.src_id
def msg_type(self) -> str:
return self._type
def config(self) -> T.Dict[str, str]:
return self._config
async def gen_msg(self, args: T.List[str] = None) -> T.Optional[Message]:
return await self._source.gen_msg(args)
| true | true |
1c473066912309ccccaf9739adb7a434c66c2b09 | 5,228 | py | Python | tests/unittests/test_rpc_messages.py | gohar94/azure-functions-python-worker | 4322e53ddbcc1eea40c1b061b42653336d9003f6 | [
"MIT"
] | null | null | null | tests/unittests/test_rpc_messages.py | gohar94/azure-functions-python-worker | 4322e53ddbcc1eea40c1b061b42653336d9003f6 | [
"MIT"
] | null | null | null | tests/unittests/test_rpc_messages.py | gohar94/azure-functions-python-worker | 4322e53ddbcc1eea40c1b061b42653336d9003f6 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import subprocess
import sys
import tempfile
import typing
import unittest
from azure_functions_worker import protos
from azure_functions_worker import testutils
class TestGRPC(testutils.AsyncTestCase):
pre_test_env = os.environ.copy()
pre_test_cwd = os.getcwd()
def _reset_environ(self):
for key, value in self.pre_test_env.items():
os.environ[key] = value
os.chdir(self.pre_test_cwd)
async def _verify_environment_reloaded(
self,
test_env: typing.Dict[str, str] = {},
test_cwd: str = os.getcwd()):
request = protos.FunctionEnvironmentReloadRequest(
environment_variables=test_env,
function_app_directory=test_cwd)
request_msg = protos.StreamingMessage(
request_id='0',
function_environment_reload_request=request)
disp = testutils.create_dummy_dispatcher()
try:
r = await disp._handle__function_environment_reload_request(
request_msg)
environ_dict = os.environ.copy()
self.assertDictEqual(environ_dict, test_env)
self.assertEqual(os.getcwd(), test_cwd)
status = r.function_environment_reload_response.result.status
self.assertEqual(status, protos.StatusResult.Success)
finally:
self._reset_environ()
async def test_multiple_env_vars_load(self):
test_env = {'TEST_KEY': 'foo', 'HELLO': 'world'}
await self._verify_environment_reloaded(test_env=test_env)
async def test_empty_env_vars_load(self):
test_env = {}
await self._verify_environment_reloaded(test_env=test_env)
@unittest.skipIf(sys.platform == 'darwin',
'MacOS creates the processes specific var folder in '
'/private filesystem and not in /var like in linux '
'systems.')
async def test_changing_current_working_directory(self):
test_cwd = tempfile.gettempdir()
await self._verify_environment_reloaded(test_cwd=test_cwd)
@unittest.skipIf(sys.platform == 'darwin',
'MacOS creates the processes specific var folder in '
'/private filesystem and not in /var like in linux '
'systems.')
async def test_reload_env_message(self):
test_env = {'TEST_KEY': 'foo', 'HELLO': 'world'}
test_cwd = tempfile.gettempdir()
await self._verify_environment_reloaded(test_env, test_cwd)
def _verify_sys_path_import(self, result, expected_output):
path_import_script = os.path.join(testutils.UNIT_TESTS_ROOT,
'path_import', 'test_path_import.sh')
try:
subprocess.run(['chmod +x ' + path_import_script], shell=True)
exported_path = ":".join(sys.path)
output = subprocess.check_output(
[path_import_script, result, exported_path],
stderr=subprocess.STDOUT)
decoded_output = output.decode(sys.stdout.encoding).strip()
self.assertTrue(expected_output in decoded_output)
finally:
subprocess.run(['chmod -x ' + path_import_script], shell=True)
self._reset_environ()
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_failed_sys_path_import(self):
self._verify_sys_path_import(
'fail',
"No module named 'test_module'")
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_successful_sys_path_import(self):
self._verify_sys_path_import(
'success',
'This module was imported!')
def _verify_azure_namespace_import(self, result, expected_output):
print(os.getcwd())
path_import_script = os.path.join(testutils.UNIT_TESTS_ROOT,
'azure_namespace_import',
'test_azure_namespace_import.sh')
try:
subprocess.run(['chmod +x ' + path_import_script], shell=True)
output = subprocess.check_output(
[path_import_script, result],
stderr=subprocess.STDOUT)
decoded_output = output.decode(sys.stdout.encoding).strip()
self.assertTrue(expected_output in decoded_output)
finally:
subprocess.run(['chmod -x ' + path_import_script], shell=True)
self._reset_environ()
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_failed_azure_namespace_import(self):
self._verify_azure_namespace_import(
'false',
'module_b fails to import')
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_successful_azure_namespace_import(self):
self._verify_azure_namespace_import(
'true',
'module_b is imported')
| 39.014925 | 79 | 0.624713 |
import os
import subprocess
import sys
import tempfile
import typing
import unittest
from azure_functions_worker import protos
from azure_functions_worker import testutils
class TestGRPC(testutils.AsyncTestCase):
pre_test_env = os.environ.copy()
pre_test_cwd = os.getcwd()
def _reset_environ(self):
for key, value in self.pre_test_env.items():
os.environ[key] = value
os.chdir(self.pre_test_cwd)
async def _verify_environment_reloaded(
self,
test_env: typing.Dict[str, str] = {},
test_cwd: str = os.getcwd()):
request = protos.FunctionEnvironmentReloadRequest(
environment_variables=test_env,
function_app_directory=test_cwd)
request_msg = protos.StreamingMessage(
request_id='0',
function_environment_reload_request=request)
disp = testutils.create_dummy_dispatcher()
try:
r = await disp._handle__function_environment_reload_request(
request_msg)
environ_dict = os.environ.copy()
self.assertDictEqual(environ_dict, test_env)
self.assertEqual(os.getcwd(), test_cwd)
status = r.function_environment_reload_response.result.status
self.assertEqual(status, protos.StatusResult.Success)
finally:
self._reset_environ()
async def test_multiple_env_vars_load(self):
test_env = {'TEST_KEY': 'foo', 'HELLO': 'world'}
await self._verify_environment_reloaded(test_env=test_env)
async def test_empty_env_vars_load(self):
test_env = {}
await self._verify_environment_reloaded(test_env=test_env)
@unittest.skipIf(sys.platform == 'darwin',
'MacOS creates the processes specific var folder in '
'/private filesystem and not in /var like in linux '
'systems.')
async def test_changing_current_working_directory(self):
test_cwd = tempfile.gettempdir()
await self._verify_environment_reloaded(test_cwd=test_cwd)
@unittest.skipIf(sys.platform == 'darwin',
'MacOS creates the processes specific var folder in '
'/private filesystem and not in /var like in linux '
'systems.')
async def test_reload_env_message(self):
test_env = {'TEST_KEY': 'foo', 'HELLO': 'world'}
test_cwd = tempfile.gettempdir()
await self._verify_environment_reloaded(test_env, test_cwd)
def _verify_sys_path_import(self, result, expected_output):
path_import_script = os.path.join(testutils.UNIT_TESTS_ROOT,
'path_import', 'test_path_import.sh')
try:
subprocess.run(['chmod +x ' + path_import_script], shell=True)
exported_path = ":".join(sys.path)
output = subprocess.check_output(
[path_import_script, result, exported_path],
stderr=subprocess.STDOUT)
decoded_output = output.decode(sys.stdout.encoding).strip()
self.assertTrue(expected_output in decoded_output)
finally:
subprocess.run(['chmod -x ' + path_import_script], shell=True)
self._reset_environ()
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_failed_sys_path_import(self):
self._verify_sys_path_import(
'fail',
"No module named 'test_module'")
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_successful_sys_path_import(self):
self._verify_sys_path_import(
'success',
'This module was imported!')
def _verify_azure_namespace_import(self, result, expected_output):
print(os.getcwd())
path_import_script = os.path.join(testutils.UNIT_TESTS_ROOT,
'azure_namespace_import',
'test_azure_namespace_import.sh')
try:
subprocess.run(['chmod +x ' + path_import_script], shell=True)
output = subprocess.check_output(
[path_import_script, result],
stderr=subprocess.STDOUT)
decoded_output = output.decode(sys.stdout.encoding).strip()
self.assertTrue(expected_output in decoded_output)
finally:
subprocess.run(['chmod -x ' + path_import_script], shell=True)
self._reset_environ()
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_failed_azure_namespace_import(self):
self._verify_azure_namespace_import(
'false',
'module_b fails to import')
@unittest.skipIf(sys.platform == 'win32',
'Linux .sh script only works on Linux')
def test_successful_azure_namespace_import(self):
self._verify_azure_namespace_import(
'true',
'module_b is imported')
| true | true |
1c47307f5960a397db57f17a221cc66eb469d0a0 | 9,974 | py | Python | tests/test_commands.py | LevitatingBusinessMan/mopidy-mpd | a8c4debc139020fbb17066b66a746644a915296c | [
"Apache-2.0"
] | 68 | 2019-12-24T22:09:05.000Z | 2022-03-06T03:56:39.000Z | tests/test_commands.py | LevitatingBusinessMan/mopidy-mpd | a8c4debc139020fbb17066b66a746644a915296c | [
"Apache-2.0"
] | 53 | 2019-12-20T23:11:11.000Z | 2022-01-30T11:20:41.000Z | tests/test_commands.py | LevitatingBusinessMan/mopidy-mpd | a8c4debc139020fbb17066b66a746644a915296c | [
"Apache-2.0"
] | 21 | 2019-12-20T23:06:20.000Z | 2022-01-20T05:43:35.000Z | import unittest
from mopidy_mpd import exceptions, protocol
class TestConverts(unittest.TestCase):
def test_integer(self):
assert 123 == protocol.INT("123")
assert (-123) == protocol.INT("-123")
assert 123 == protocol.INT("+123")
self.assertRaises(ValueError, protocol.INT, "3.14")
self.assertRaises(ValueError, protocol.INT, "")
self.assertRaises(ValueError, protocol.INT, "abc")
self.assertRaises(ValueError, protocol.INT, "12 34")
def test_unsigned_integer(self):
assert 123 == protocol.UINT("123")
self.assertRaises(ValueError, protocol.UINT, "-123")
self.assertRaises(ValueError, protocol.UINT, "+123")
self.assertRaises(ValueError, protocol.UINT, "3.14")
self.assertRaises(ValueError, protocol.UINT, "")
self.assertRaises(ValueError, protocol.UINT, "abc")
self.assertRaises(ValueError, protocol.UINT, "12 34")
def test_boolean(self):
assert protocol.BOOL("1") is True
assert protocol.BOOL("0") is False
self.assertRaises(ValueError, protocol.BOOL, "3.14")
self.assertRaises(ValueError, protocol.BOOL, "")
self.assertRaises(ValueError, protocol.BOOL, "true")
self.assertRaises(ValueError, protocol.BOOL, "false")
self.assertRaises(ValueError, protocol.BOOL, "abc")
self.assertRaises(ValueError, protocol.BOOL, "12 34")
def test_range(self):
assert slice(1, 2) == protocol.RANGE("1")
assert slice(0, 1) == protocol.RANGE("0")
assert slice(0, None) == protocol.RANGE("0:")
assert slice(1, 3) == protocol.RANGE("1:3")
self.assertRaises(ValueError, protocol.RANGE, "3.14")
self.assertRaises(ValueError, protocol.RANGE, "1:abc")
self.assertRaises(ValueError, protocol.RANGE, "abc:1")
self.assertRaises(ValueError, protocol.RANGE, "2:1")
self.assertRaises(ValueError, protocol.RANGE, "-1:2")
self.assertRaises(ValueError, protocol.RANGE, "1 : 2")
self.assertRaises(ValueError, protocol.RANGE, "")
self.assertRaises(ValueError, protocol.RANGE, "true")
self.assertRaises(ValueError, protocol.RANGE, "false")
self.assertRaises(ValueError, protocol.RANGE, "abc")
self.assertRaises(ValueError, protocol.RANGE, "12 34")
class TestCommands(unittest.TestCase):
def setUp(self): # noqa: N802
self.commands = protocol.Commands()
def test_add_as_a_decorator(self):
@self.commands.add("test")
def test(context):
pass
def test_register_second_command_to_same_name_fails(self):
def func(context):
pass
self.commands.add("foo")(func)
with self.assertRaises(ValueError):
self.commands.add("foo")(func)
def test_function_only_takes_context_succeeds(self):
sentinel = object()
self.commands.add("bar")(lambda context: sentinel)
assert sentinel == self.commands.call(["bar"])
def test_function_has_required_arg_succeeds(self):
sentinel = object()
self.commands.add("bar")(lambda context, required: sentinel)
assert sentinel == self.commands.call(["bar", "arg"])
def test_function_has_optional_args_succeeds(self):
sentinel = object()
self.commands.add("bar")(lambda context, optional=None: sentinel)
assert sentinel == self.commands.call(["bar"])
assert sentinel == self.commands.call(["bar", "arg"])
def test_function_has_required_and_optional_args_succeeds(self):
sentinel = object()
def func(context, required, optional=None):
return sentinel
self.commands.add("bar")(func)
assert sentinel == self.commands.call(["bar", "arg"])
assert sentinel == self.commands.call(["bar", "arg", "arg"])
def test_function_has_varargs_succeeds(self):
sentinel, args = object(), []
self.commands.add("bar")(lambda context, *args: sentinel)
for _ in range(10):
assert sentinel == self.commands.call((["bar"] + args))
args.append("test")
def test_function_has_only_varags_succeeds(self):
sentinel = object()
self.commands.add("baz")(lambda *args: sentinel)
assert sentinel == self.commands.call(["baz"])
def test_function_has_no_arguments_fails(self):
with self.assertRaises(TypeError):
self.commands.add("test")(lambda: True)
def test_function_has_required_and_varargs_fails(self):
with self.assertRaises(TypeError):
def func(context, required, *args):
pass
self.commands.add("test")(func)
def test_function_has_optional_and_varargs_fails(self):
with self.assertRaises(TypeError):
def func(context, optional=None, *args):
pass
self.commands.add("test")(func)
def test_function_hash_keywordargs_fails(self):
with self.assertRaises(TypeError):
self.commands.add("test")(lambda context, **kwargs: True)
def test_call_chooses_correct_handler(self):
sentinel1, sentinel2, sentinel3 = object(), object(), object()
self.commands.add("foo")(lambda context: sentinel1)
self.commands.add("bar")(lambda context: sentinel2)
self.commands.add("baz")(lambda context: sentinel3)
assert sentinel1 == self.commands.call(["foo"])
assert sentinel2 == self.commands.call(["bar"])
assert sentinel3 == self.commands.call(["baz"])
def test_call_with_nonexistent_handler(self):
with self.assertRaises(exceptions.MpdUnknownCommand):
self.commands.call(["bar"])
def test_call_passes_context(self):
sentinel = object()
self.commands.add("foo")(lambda context: context)
assert sentinel == self.commands.call(["foo"], context=sentinel)
def test_call_without_args_fails(self):
with self.assertRaises(exceptions.MpdNoCommand):
self.commands.call([])
def test_call_passes_required_argument(self):
self.commands.add("foo")(lambda context, required: required)
assert "test123" == self.commands.call(["foo", "test123"])
def test_call_passes_optional_argument(self):
sentinel = object()
self.commands.add("foo")(lambda context, optional=sentinel: optional)
assert sentinel == self.commands.call(["foo"])
assert "test" == self.commands.call(["foo", "test"])
def test_call_passes_required_and_optional_argument(self):
def func(context, required, optional=None):
return (required, optional)
self.commands.add("foo")(func)
assert ("arg", None) == self.commands.call(["foo", "arg"])
assert ("arg", "kwarg") == self.commands.call(["foo", "arg", "kwarg"])
def test_call_passes_varargs(self):
self.commands.add("foo")(lambda context, *args: args)
def test_call_incorrect_args(self):
self.commands.add("foo")(lambda context: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["foo", "bar"])
self.commands.add("bar")(lambda context, required: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["bar", "bar", "baz"])
self.commands.add("baz")(lambda context, optional=None: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["baz", "bar", "baz"])
def test_validator_gets_applied_to_required_arg(self):
sentinel = object()
def func(context, required):
return required
self.commands.add("test", required=lambda v: sentinel)(func)
assert sentinel == self.commands.call(["test", "foo"])
def test_validator_gets_applied_to_optional_arg(self):
sentinel = object()
def func(context, optional=None):
return optional
self.commands.add("foo", optional=lambda v: sentinel)(func)
assert sentinel == self.commands.call(["foo", "123"])
def test_validator_skips_optional_default(self):
sentinel = object()
def func(context, optional=sentinel):
return optional
self.commands.add("foo", optional=lambda v: None)(func)
assert sentinel == self.commands.call(["foo"])
def test_validator_applied_to_non_existent_arg_fails(self):
self.commands.add("foo")(lambda context, arg: arg)
with self.assertRaises(TypeError):
def func(context, wrong_arg):
return wrong_arg
self.commands.add("bar", arg=lambda v: v)(func)
def test_validator_called_context_fails(self):
return # TODO: how to handle this
with self.assertRaises(TypeError):
def func(context):
pass
self.commands.add("bar", context=lambda v: v)(func)
def test_validator_value_error_is_converted(self):
def validdate(value):
raise ValueError
def func(context, arg):
pass
self.commands.add("bar", arg=validdate)(func)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["bar", "test"])
def test_auth_required_gets_stored(self):
def func1(context):
pass
def func2(context):
pass
self.commands.add("foo")(func1)
self.commands.add("bar", auth_required=False)(func2)
assert self.commands.handlers["foo"].auth_required
assert not self.commands.handlers["bar"].auth_required
def test_list_command_gets_stored(self):
def func1(context):
pass
def func2(context):
pass
self.commands.add("foo")(func1)
self.commands.add("bar", list_command=False)(func2)
assert self.commands.handlers["foo"].list_command
assert not self.commands.handlers["bar"].list_command
| 36.534799 | 78 | 0.641267 | import unittest
from mopidy_mpd import exceptions, protocol
class TestConverts(unittest.TestCase):
def test_integer(self):
assert 123 == protocol.INT("123")
assert (-123) == protocol.INT("-123")
assert 123 == protocol.INT("+123")
self.assertRaises(ValueError, protocol.INT, "3.14")
self.assertRaises(ValueError, protocol.INT, "")
self.assertRaises(ValueError, protocol.INT, "abc")
self.assertRaises(ValueError, protocol.INT, "12 34")
def test_unsigned_integer(self):
assert 123 == protocol.UINT("123")
self.assertRaises(ValueError, protocol.UINT, "-123")
self.assertRaises(ValueError, protocol.UINT, "+123")
self.assertRaises(ValueError, protocol.UINT, "3.14")
self.assertRaises(ValueError, protocol.UINT, "")
self.assertRaises(ValueError, protocol.UINT, "abc")
self.assertRaises(ValueError, protocol.UINT, "12 34")
def test_boolean(self):
assert protocol.BOOL("1") is True
assert protocol.BOOL("0") is False
self.assertRaises(ValueError, protocol.BOOL, "3.14")
self.assertRaises(ValueError, protocol.BOOL, "")
self.assertRaises(ValueError, protocol.BOOL, "true")
self.assertRaises(ValueError, protocol.BOOL, "false")
self.assertRaises(ValueError, protocol.BOOL, "abc")
self.assertRaises(ValueError, protocol.BOOL, "12 34")
def test_range(self):
assert slice(1, 2) == protocol.RANGE("1")
assert slice(0, 1) == protocol.RANGE("0")
assert slice(0, None) == protocol.RANGE("0:")
assert slice(1, 3) == protocol.RANGE("1:3")
self.assertRaises(ValueError, protocol.RANGE, "3.14")
self.assertRaises(ValueError, protocol.RANGE, "1:abc")
self.assertRaises(ValueError, protocol.RANGE, "abc:1")
self.assertRaises(ValueError, protocol.RANGE, "2:1")
self.assertRaises(ValueError, protocol.RANGE, "-1:2")
self.assertRaises(ValueError, protocol.RANGE, "1 : 2")
self.assertRaises(ValueError, protocol.RANGE, "")
self.assertRaises(ValueError, protocol.RANGE, "true")
self.assertRaises(ValueError, protocol.RANGE, "false")
self.assertRaises(ValueError, protocol.RANGE, "abc")
self.assertRaises(ValueError, protocol.RANGE, "12 34")
class TestCommands(unittest.TestCase):
def setUp(self):
self.commands = protocol.Commands()
def test_add_as_a_decorator(self):
@self.commands.add("test")
def test(context):
pass
def test_register_second_command_to_same_name_fails(self):
def func(context):
pass
self.commands.add("foo")(func)
with self.assertRaises(ValueError):
self.commands.add("foo")(func)
def test_function_only_takes_context_succeeds(self):
sentinel = object()
self.commands.add("bar")(lambda context: sentinel)
assert sentinel == self.commands.call(["bar"])
def test_function_has_required_arg_succeeds(self):
sentinel = object()
self.commands.add("bar")(lambda context, required: sentinel)
assert sentinel == self.commands.call(["bar", "arg"])
def test_function_has_optional_args_succeeds(self):
sentinel = object()
self.commands.add("bar")(lambda context, optional=None: sentinel)
assert sentinel == self.commands.call(["bar"])
assert sentinel == self.commands.call(["bar", "arg"])
def test_function_has_required_and_optional_args_succeeds(self):
sentinel = object()
def func(context, required, optional=None):
return sentinel
self.commands.add("bar")(func)
assert sentinel == self.commands.call(["bar", "arg"])
assert sentinel == self.commands.call(["bar", "arg", "arg"])
def test_function_has_varargs_succeeds(self):
sentinel, args = object(), []
self.commands.add("bar")(lambda context, *args: sentinel)
for _ in range(10):
assert sentinel == self.commands.call((["bar"] + args))
args.append("test")
def test_function_has_only_varags_succeeds(self):
sentinel = object()
self.commands.add("baz")(lambda *args: sentinel)
assert sentinel == self.commands.call(["baz"])
def test_function_has_no_arguments_fails(self):
with self.assertRaises(TypeError):
self.commands.add("test")(lambda: True)
def test_function_has_required_and_varargs_fails(self):
with self.assertRaises(TypeError):
def func(context, required, *args):
pass
self.commands.add("test")(func)
def test_function_has_optional_and_varargs_fails(self):
with self.assertRaises(TypeError):
def func(context, optional=None, *args):
pass
self.commands.add("test")(func)
def test_function_hash_keywordargs_fails(self):
with self.assertRaises(TypeError):
self.commands.add("test")(lambda context, **kwargs: True)
def test_call_chooses_correct_handler(self):
sentinel1, sentinel2, sentinel3 = object(), object(), object()
self.commands.add("foo")(lambda context: sentinel1)
self.commands.add("bar")(lambda context: sentinel2)
self.commands.add("baz")(lambda context: sentinel3)
assert sentinel1 == self.commands.call(["foo"])
assert sentinel2 == self.commands.call(["bar"])
assert sentinel3 == self.commands.call(["baz"])
def test_call_with_nonexistent_handler(self):
with self.assertRaises(exceptions.MpdUnknownCommand):
self.commands.call(["bar"])
def test_call_passes_context(self):
sentinel = object()
self.commands.add("foo")(lambda context: context)
assert sentinel == self.commands.call(["foo"], context=sentinel)
def test_call_without_args_fails(self):
with self.assertRaises(exceptions.MpdNoCommand):
self.commands.call([])
def test_call_passes_required_argument(self):
self.commands.add("foo")(lambda context, required: required)
assert "test123" == self.commands.call(["foo", "test123"])
def test_call_passes_optional_argument(self):
sentinel = object()
self.commands.add("foo")(lambda context, optional=sentinel: optional)
assert sentinel == self.commands.call(["foo"])
assert "test" == self.commands.call(["foo", "test"])
def test_call_passes_required_and_optional_argument(self):
def func(context, required, optional=None):
return (required, optional)
self.commands.add("foo")(func)
assert ("arg", None) == self.commands.call(["foo", "arg"])
assert ("arg", "kwarg") == self.commands.call(["foo", "arg", "kwarg"])
def test_call_passes_varargs(self):
self.commands.add("foo")(lambda context, *args: args)
def test_call_incorrect_args(self):
self.commands.add("foo")(lambda context: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["foo", "bar"])
self.commands.add("bar")(lambda context, required: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["bar", "bar", "baz"])
self.commands.add("baz")(lambda context, optional=None: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["baz", "bar", "baz"])
def test_validator_gets_applied_to_required_arg(self):
sentinel = object()
def func(context, required):
return required
self.commands.add("test", required=lambda v: sentinel)(func)
assert sentinel == self.commands.call(["test", "foo"])
def test_validator_gets_applied_to_optional_arg(self):
sentinel = object()
def func(context, optional=None):
return optional
self.commands.add("foo", optional=lambda v: sentinel)(func)
assert sentinel == self.commands.call(["foo", "123"])
def test_validator_skips_optional_default(self):
sentinel = object()
def func(context, optional=sentinel):
return optional
self.commands.add("foo", optional=lambda v: None)(func)
assert sentinel == self.commands.call(["foo"])
def test_validator_applied_to_non_existent_arg_fails(self):
self.commands.add("foo")(lambda context, arg: arg)
with self.assertRaises(TypeError):
def func(context, wrong_arg):
return wrong_arg
self.commands.add("bar", arg=lambda v: v)(func)
def test_validator_called_context_fails(self):
return
with self.assertRaises(TypeError):
def func(context):
pass
self.commands.add("bar", context=lambda v: v)(func)
def test_validator_value_error_is_converted(self):
def validdate(value):
raise ValueError
def func(context, arg):
pass
self.commands.add("bar", arg=validdate)(func)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(["bar", "test"])
def test_auth_required_gets_stored(self):
def func1(context):
pass
def func2(context):
pass
self.commands.add("foo")(func1)
self.commands.add("bar", auth_required=False)(func2)
assert self.commands.handlers["foo"].auth_required
assert not self.commands.handlers["bar"].auth_required
def test_list_command_gets_stored(self):
def func1(context):
pass
def func2(context):
pass
self.commands.add("foo")(func1)
self.commands.add("bar", list_command=False)(func2)
assert self.commands.handlers["foo"].list_command
assert not self.commands.handlers["bar"].list_command
| true | true |
1c4730ff99573a9d528ea79ff21a35220231baae | 1,563 | py | Python | tests/test_healpix_binning.py | erykoff/skyproj | f00af06df032c6956e9ce191b55b173eb5415b3a | [
"BSD-3-Clause"
] | 6 | 2022-02-22T15:44:35.000Z | 2022-03-31T17:14:18.000Z | tests/test_healpix_binning.py | erykoff/skyproj | f00af06df032c6956e9ce191b55b173eb5415b3a | [
"BSD-3-Clause"
] | 15 | 2022-01-11T22:06:16.000Z | 2022-03-07T21:49:48.000Z | tests/test_healpix_binning.py | LSSTDESC/skyproj | c1e3365e958b2bd99e72e4e053da6b0ddaceb2b2 | [
"BSD-3-Clause"
] | null | null | null | import os
import numpy as np
import healpy as hp
import matplotlib
matplotlib.use("Agg")
from matplotlib.testing.compare import compare_images, ImageComparisonFailure # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
import skyproj # noqa: E402
ROOT = os.path.abspath(os.path.dirname(__file__))
def test_healpix_binning(tmp_path):
"""Test healpix binning functionality."""
plt.rcParams.update(plt.rcParamsDefault)
np.random.seed(1234)
ra = np.random.uniform(low=30.0, high=40.0, size=10000)
dec = np.random.uniform(low=45.0, high=55.0, size=10000)
C = np.random.uniform(low=0.0, high=10.0, size=10000)
fig = plt.figure(1, figsize=(8, 5))
fig.clf()
ax = fig.add_subplot(111)
sp = skyproj.McBrydeSkyproj(ax=ax)
hpxmap, im, lon_raster, lat_raster, values_raster = sp.draw_hpxbin(ra, dec)
# Spot-check a pixel
pix = hp.ang2pix(hp.npix2nside(hpxmap.size), ra, dec, lonlat=True)
test, = np.where(pix == 87864)
assert(hpxmap[87864] == test.size)
fname = 'hpxbin.png'
fig.savefig(tmp_path / fname)
err = compare_images(os.path.join(ROOT, 'data', fname), tmp_path / fname, 40.0)
if err:
raise ImageComparisonFailure(err)
# Redo with averaging over values
fig = plt.figure(1, figsize=(8, 5))
fig.clf()
ax = fig.add_subplot(111)
sp = skyproj.McBrydeSkyproj(ax=ax)
hpxmap, im, lon_raster, lat_raster, values_raster = sp.draw_hpxbin(ra, dec, C=C)
# Spot-check the pixel
np.testing.assert_approx_equal(hpxmap[87864], np.mean(C[test]))
| 28.944444 | 91 | 0.684581 | import os
import numpy as np
import healpy as hp
import matplotlib
matplotlib.use("Agg")
from matplotlib.testing.compare import compare_images, ImageComparisonFailure
import matplotlib.pyplot as plt
import skyproj
ROOT = os.path.abspath(os.path.dirname(__file__))
def test_healpix_binning(tmp_path):
plt.rcParams.update(plt.rcParamsDefault)
np.random.seed(1234)
ra = np.random.uniform(low=30.0, high=40.0, size=10000)
dec = np.random.uniform(low=45.0, high=55.0, size=10000)
C = np.random.uniform(low=0.0, high=10.0, size=10000)
fig = plt.figure(1, figsize=(8, 5))
fig.clf()
ax = fig.add_subplot(111)
sp = skyproj.McBrydeSkyproj(ax=ax)
hpxmap, im, lon_raster, lat_raster, values_raster = sp.draw_hpxbin(ra, dec)
pix = hp.ang2pix(hp.npix2nside(hpxmap.size), ra, dec, lonlat=True)
test, = np.where(pix == 87864)
assert(hpxmap[87864] == test.size)
fname = 'hpxbin.png'
fig.savefig(tmp_path / fname)
err = compare_images(os.path.join(ROOT, 'data', fname), tmp_path / fname, 40.0)
if err:
raise ImageComparisonFailure(err)
fig = plt.figure(1, figsize=(8, 5))
fig.clf()
ax = fig.add_subplot(111)
sp = skyproj.McBrydeSkyproj(ax=ax)
hpxmap, im, lon_raster, lat_raster, values_raster = sp.draw_hpxbin(ra, dec, C=C)
np.testing.assert_approx_equal(hpxmap[87864], np.mean(C[test]))
| true | true |
1c47316fc571cbecdc8de42cb48fe3b2c354deab | 278 | py | Python | cap2/pangea/constants.py | nanusefue/CAP2-1 | 670b343ac7629fe0e64e86263ae420b01952f427 | [
"MIT"
] | 9 | 2020-07-10T15:45:12.000Z | 2022-01-19T10:44:13.000Z | cap2/pangea/constants.py | nanusefue/CAP2-1 | 670b343ac7629fe0e64e86263ae420b01952f427 | [
"MIT"
] | 14 | 2020-06-15T16:04:54.000Z | 2022-03-12T01:05:47.000Z | cap2/pangea/constants.py | nanusefue/CAP2-1 | 670b343ac7629fe0e64e86263ae420b01952f427 | [
"MIT"
] | 5 | 2021-01-05T01:26:48.000Z | 2022-01-23T11:20:49.000Z |
CAP_WORK_ORDER_PROTO_NAME = 'cap'
WORK_ORDER_PROTOS = {
'metasub_cap_qc': ('fast', '435ffcd4-a582-47d8-97b6-bf4f3a42aec5'), # TODO no harcoded UUIDs
'pre': ('pre', '62a7b78f-cb95-42d6-b956-68b45abe47f5'),
'reads': ('reads', 'f6949311-f60b-44b9-8fe4-22df2060a379'),
} | 34.75 | 94 | 0.697842 |
CAP_WORK_ORDER_PROTO_NAME = 'cap'
WORK_ORDER_PROTOS = {
'metasub_cap_qc': ('fast', '435ffcd4-a582-47d8-97b6-bf4f3a42aec5'),
'pre': ('pre', '62a7b78f-cb95-42d6-b956-68b45abe47f5'),
'reads': ('reads', 'f6949311-f60b-44b9-8fe4-22df2060a379'),
} | true | true |
1c4731b7d21eb89aaf132cf4faedc6cb3e060ee6 | 11,999 | py | Python | ament_cpplint/ament_cpplint/main.py | mjbogusz/ament_lint | 1f5c6bba4c5180aa8d2b593c6f3aa8ee1309d36a | [
"Apache-2.0"
] | 23 | 2015-07-08T05:42:24.000Z | 2022-03-14T02:13:01.000Z | ament_cpplint/ament_cpplint/main.py | mjbogusz/ament_lint | 1f5c6bba4c5180aa8d2b593c6f3aa8ee1309d36a | [
"Apache-2.0"
] | 292 | 2015-03-06T20:11:45.000Z | 2022-03-31T22:30:41.000Z | ament_cpplint/ament_cpplint/main.py | mjbogusz/ament_lint | 1f5c6bba4c5180aa8d2b593c6f3aa8ee1309d36a | [
"Apache-2.0"
] | 71 | 2016-05-24T01:24:54.000Z | 2022-03-23T07:42:41.000Z | #!/usr/bin/env python3
# Copyright 2014-2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import re
import sys
import time
from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
from ament_cpplint import cpplint
from ament_cpplint.cpplint import _cpplint_state
from ament_cpplint.cpplint import ParseArguments
from ament_cpplint.cpplint import ProcessFile
# use custom header guard with two underscore between the name parts
def custom_get_header_guard_cpp_variable(filename):
from ament_cpplint.cpplint import _root
from ament_cpplint.cpplint import FileInfo
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
prefix = _root + os.sep
# use consistent separator on Windows
if os.sep != '/':
prefix = prefix.replace(os.sep, '/')
if file_path_from_root.startswith(prefix):
file_path_from_root = file_path_from_root[len(prefix):]
else:
filename = filename.replace(os.sep, '/')
if filename.startswith(prefix):
file_path_from_root = filename[len(prefix):]
# use double separator
file_path_from_root = file_path_from_root.replace('/', '//')
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
cpplint.GetHeaderGuardCPPVariable = custom_get_header_guard_cpp_variable
def main(argv=sys.argv[1:]):
extensions = ['c', 'cc', 'cpp', 'cxx']
headers = ['h', 'hh', 'hpp', 'hxx']
parser = argparse.ArgumentParser(
description='Check code against the Google style conventions using '
'cpplint.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--filters', metavar='FILTER,FILTER,...', type=str,
help='A comma separated list of category filters to apply')
parser.add_argument(
'--linelength', metavar='N', type=int, default=100,
help='The maximum line length')
parser.add_argument(
'--root', type=str,
help='The --root option for cpplint')
parser.add_argument(
'--exclude', default=[],
nargs='*',
help='Exclude C/C++ files from being checked.')
parser.add_argument(
'paths',
nargs='*',
default=[os.curdir],
help='The files or directories to check. For directories files ending '
'in %s will be considered.' %
', '.join(["'.%s'" % e for e in extensions + headers]))
# not using a file handle directly
# in order to prevent leaving an empty file when something fails early
parser.add_argument(
'--xunit-file',
help='Generate a xunit compliant XML file')
args = parser.parse_args(argv)
if args.xunit_file:
start_time = time.time()
argv = []
# collect category based counts
argv.append('--counting=detailed')
argv.append('--extensions=%s' % ','.join(extensions))
argv.append('--headers=%s' % ','.join(headers))
filters = [
# we do allow C++11
'-build/c++11',
# we consider passing non-const references to be ok
'-runtime/references',
# we wrap open curly braces for namespaces, classes and functions
'-whitespace/braces',
# we don't indent keywords like public, protected and private with one space
'-whitespace/indent',
# we allow closing parenthesis to be on the next line
'-whitespace/parens',
# we allow the developer to decide about whitespace after a semicolon
'-whitespace/semicolon',
]
if args.filters:
filters += args.filters.split(',')
argv.append('--filter=%s' % ','.join(filters))
argv.append('--linelength=%d' % args.linelength)
groups = get_file_groups(args.paths, extensions + headers, args.exclude)
if not groups:
print('No files found', file=sys.stderr)
return 1
# hook into error reporting
DefaultError = cpplint.Error # noqa: N806
report = []
# invoke cpplint for each root group of files
_cpplint_state.ResetErrorCounts()
for root in sorted(groups.keys()):
files = groups[root]
arguments = list(argv)
if args.root:
root = os.path.abspath(args.root)
if root:
root_arg = '--root=%s' % root
arguments.append(root_arg)
print("Using '%s' argument" % root_arg)
else:
print("Not using '--root'")
print('')
arguments += files
filenames = ParseArguments(arguments)
for filename in filenames:
# hook into error reporting
errors = []
def custom_error(filename, linenum, category, confidence, message):
if cpplint._ShouldPrintError(category, confidence, linenum):
errors.append({
'linenum': linenum,
'category': category,
'confidence': confidence,
'message': message,
})
DefaultError(filename, linenum, category, confidence, message)
cpplint.Error = custom_error
ProcessFile(filename, _cpplint_state.verbose_level)
report.append((filename, errors))
print('')
# output summary
for category in sorted(_cpplint_state.errors_by_category.keys()):
count = _cpplint_state.errors_by_category[category]
print("Category '%s' errors found: %d" % (category, count),
file=sys.stderr)
if _cpplint_state.error_count:
print('Total errors found: %d' % _cpplint_state.error_count,
file=sys.stderr)
else:
print('No problems found')
# generate xunit file
if args.xunit_file:
folder_name = os.path.basename(os.path.dirname(args.xunit_file))
file_name = os.path.basename(args.xunit_file)
suffix = '.xml'
if file_name.endswith(suffix):
file_name = file_name[0:-len(suffix)]
suffix = '.xunit'
if file_name.endswith(suffix):
file_name = file_name[0:-len(suffix)]
testname = '%s.%s' % (folder_name, file_name)
xml = get_xunit_content(report, testname, time.time() - start_time)
path = os.path.dirname(os.path.abspath(args.xunit_file))
if not os.path.exists(path):
os.makedirs(path)
with open(args.xunit_file, 'w') as f:
f.write(xml)
return 1 if _cpplint_state.error_count else 0
def get_file_groups(paths, extensions, exclude_patterns):
excludes = []
for exclude_pattern in exclude_patterns:
excludes.extend(glob.glob(exclude_pattern))
excludes = {os.path.realpath(x) for x in excludes}
# dict mapping root path to files
groups = {}
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
if 'AMENT_IGNORE' in dirnames + filenames:
dirnames[:] = []
continue
# ignore folder starting with . or _
dirnames[:] = [d for d in dirnames if d[0] not in ['.', '_']]
dirnames.sort()
# select files by extension
for filename in sorted(filenames):
_, ext = os.path.splitext(filename)
if ext in ('.%s' % e for e in extensions):
filepath = os.path.join(dirpath, filename)
if os.path.realpath(filepath) not in excludes:
append_file_to_group(groups, filepath)
if os.path.isfile(path):
if os.path.realpath(path) not in excludes:
append_file_to_group(groups, path)
return groups
def append_file_to_group(groups, path):
path = os.path.abspath(path)
root = ''
# try to determine root from path
base_path = os.path.dirname(path)
# find longest subpath which ends with one of the following subfolder names
subfolder_names = ['include', 'src', 'test']
matches = [
re.search(
'^(.+%s%s)%s' %
(re.escape(os.sep), re.escape(subfolder_name), re.escape(os.sep)), path)
for subfolder_name in subfolder_names]
match_groups = [match.group(1) for match in matches if match]
if match_groups:
match_groups = [{'group_len': len(x), 'group': x} for x in match_groups]
sorted_groups = sorted(match_groups, key=lambda k: k['group_len'])
base_path = sorted_groups[-1]['group']
root = base_path
# try to find repository root
repo_root = None
p = path
while p and repo_root is None:
# abort if root is reached
if os.path.dirname(p) == p:
break
p = os.path.dirname(p)
for marker in ['.git', '.hg', '.svn']:
if os.path.exists(os.path.join(p, marker)):
repo_root = p
break
# compute relative --root argument
if repo_root and repo_root > base_path:
root = os.path.relpath(base_path, repo_root)
# add the path to the appropriate group
if root not in groups:
groups[root] = []
groups[root].append(path)
def get_xunit_content(report, testname, elapsed):
test_count = sum(max(len(r[1]), 1) for r in report)
error_count = sum(len(r[1]) for r in report)
data = {
'testname': testname,
'test_count': test_count,
'error_count': error_count,
'time': '%.3f' % round(elapsed, 3),
}
xml = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite
name="%(testname)s"
tests="%(test_count)d"
failures="%(error_count)d"
errors="0"
time="%(time)s"
>
""" % data
for (filename, errors) in report:
if errors:
# report each cpplint error as a failing testcase
for error in errors:
data = {
'quoted_name': quoteattr(
'%s [%s] (%s:%d)' % (
error['category'], error['confidence'],
filename, error['linenum'])),
'testname': testname,
'quoted_message': quoteattr(error['message']),
}
xml += """ <testcase
name=%(quoted_name)s
classname="%(testname)s"
>
<failure message=%(quoted_message)s/>
</testcase>
""" % data
else:
# if there are no cpplint errors report a single successful test
data = {
'quoted_location': quoteattr(filename),
'testname': testname,
}
xml += """ <testcase
name=%(quoted_location)s
classname="%(testname)s"/>
""" % data
# output list of checked files
data = {
'escaped_files': escape(''.join(['\n* %s' % r[0] for r in report])),
}
xml += """ <system-out>Checked files:%(escaped_files)s</system-out>
""" % data
xml += '</testsuite>\n'
return xml
if __name__ == '__main__':
sys.exit(main())
| 34.479885 | 84 | 0.5958 |
import argparse
import glob
import os
import re
import sys
import time
from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
from ament_cpplint import cpplint
from ament_cpplint.cpplint import _cpplint_state
from ament_cpplint.cpplint import ParseArguments
from ament_cpplint.cpplint import ProcessFile
def custom_get_header_guard_cpp_variable(filename):
from ament_cpplint.cpplint import _root
from ament_cpplint.cpplint import FileInfo
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
prefix = _root + os.sep
# use consistent separator on Windows
if os.sep != '/':
prefix = prefix.replace(os.sep, '/')
if file_path_from_root.startswith(prefix):
file_path_from_root = file_path_from_root[len(prefix):]
else:
filename = filename.replace(os.sep, '/')
if filename.startswith(prefix):
file_path_from_root = filename[len(prefix):]
# use double separator
file_path_from_root = file_path_from_root.replace('/', '//')
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
cpplint.GetHeaderGuardCPPVariable = custom_get_header_guard_cpp_variable
def main(argv=sys.argv[1:]):
extensions = ['c', 'cc', 'cpp', 'cxx']
headers = ['h', 'hh', 'hpp', 'hxx']
parser = argparse.ArgumentParser(
description='Check code against the Google style conventions using '
'cpplint.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--filters', metavar='FILTER,FILTER,...', type=str,
help='A comma separated list of category filters to apply')
parser.add_argument(
'--linelength', metavar='N', type=int, default=100,
help='The maximum line length')
parser.add_argument(
'--root', type=str,
help='The --root option for cpplint')
parser.add_argument(
'--exclude', default=[],
nargs='*',
help='Exclude C/C++ files from being checked.')
parser.add_argument(
'paths',
nargs='*',
default=[os.curdir],
help='The files or directories to check. For directories files ending '
'in %s will be considered.' %
', '.join(["'.%s'" % e for e in extensions + headers]))
# not using a file handle directly
# in order to prevent leaving an empty file when something fails early
parser.add_argument(
'--xunit-file',
help='Generate a xunit compliant XML file')
args = parser.parse_args(argv)
if args.xunit_file:
start_time = time.time()
argv = []
# collect category based counts
argv.append('--counting=detailed')
argv.append('--extensions=%s' % ','.join(extensions))
argv.append('--headers=%s' % ','.join(headers))
filters = [
# we do allow C++11
'-build/c++11',
# we consider passing non-const references to be ok
'-runtime/references',
# we wrap open curly braces for namespaces, classes and functions
'-whitespace/braces',
# we don't indent keywords like public, protected and private with one space
'-whitespace/indent',
'-whitespace/parens',
'-whitespace/semicolon',
]
if args.filters:
filters += args.filters.split(',')
argv.append('--filter=%s' % ','.join(filters))
argv.append('--linelength=%d' % args.linelength)
groups = get_file_groups(args.paths, extensions + headers, args.exclude)
if not groups:
print('No files found', file=sys.stderr)
return 1
DefaultError = cpplint.Error
report = []
_cpplint_state.ResetErrorCounts()
for root in sorted(groups.keys()):
files = groups[root]
arguments = list(argv)
if args.root:
root = os.path.abspath(args.root)
if root:
root_arg = '--root=%s' % root
arguments.append(root_arg)
print("Using '%s' argument" % root_arg)
else:
print("Not using '--root'")
print('')
arguments += files
filenames = ParseArguments(arguments)
for filename in filenames:
errors = []
def custom_error(filename, linenum, category, confidence, message):
if cpplint._ShouldPrintError(category, confidence, linenum):
errors.append({
'linenum': linenum,
'category': category,
'confidence': confidence,
'message': message,
})
DefaultError(filename, linenum, category, confidence, message)
cpplint.Error = custom_error
ProcessFile(filename, _cpplint_state.verbose_level)
report.append((filename, errors))
print('')
for category in sorted(_cpplint_state.errors_by_category.keys()):
count = _cpplint_state.errors_by_category[category]
print("Category '%s' errors found: %d" % (category, count),
file=sys.stderr)
if _cpplint_state.error_count:
print('Total errors found: %d' % _cpplint_state.error_count,
file=sys.stderr)
else:
print('No problems found')
if args.xunit_file:
folder_name = os.path.basename(os.path.dirname(args.xunit_file))
file_name = os.path.basename(args.xunit_file)
suffix = '.xml'
if file_name.endswith(suffix):
file_name = file_name[0:-len(suffix)]
suffix = '.xunit'
if file_name.endswith(suffix):
file_name = file_name[0:-len(suffix)]
testname = '%s.%s' % (folder_name, file_name)
xml = get_xunit_content(report, testname, time.time() - start_time)
path = os.path.dirname(os.path.abspath(args.xunit_file))
if not os.path.exists(path):
os.makedirs(path)
with open(args.xunit_file, 'w') as f:
f.write(xml)
return 1 if _cpplint_state.error_count else 0
def get_file_groups(paths, extensions, exclude_patterns):
excludes = []
for exclude_pattern in exclude_patterns:
excludes.extend(glob.glob(exclude_pattern))
excludes = {os.path.realpath(x) for x in excludes}
groups = {}
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
if 'AMENT_IGNORE' in dirnames + filenames:
dirnames[:] = []
continue
dirnames[:] = [d for d in dirnames if d[0] not in ['.', '_']]
dirnames.sort()
for filename in sorted(filenames):
_, ext = os.path.splitext(filename)
if ext in ('.%s' % e for e in extensions):
filepath = os.path.join(dirpath, filename)
if os.path.realpath(filepath) not in excludes:
append_file_to_group(groups, filepath)
if os.path.isfile(path):
if os.path.realpath(path) not in excludes:
append_file_to_group(groups, path)
return groups
def append_file_to_group(groups, path):
path = os.path.abspath(path)
root = ''
base_path = os.path.dirname(path)
subfolder_names = ['include', 'src', 'test']
matches = [
re.search(
'^(.+%s%s)%s' %
(re.escape(os.sep), re.escape(subfolder_name), re.escape(os.sep)), path)
for subfolder_name in subfolder_names]
match_groups = [match.group(1) for match in matches if match]
if match_groups:
match_groups = [{'group_len': len(x), 'group': x} for x in match_groups]
sorted_groups = sorted(match_groups, key=lambda k: k['group_len'])
base_path = sorted_groups[-1]['group']
root = base_path
repo_root = None
p = path
while p and repo_root is None:
if os.path.dirname(p) == p:
break
p = os.path.dirname(p)
for marker in ['.git', '.hg', '.svn']:
if os.path.exists(os.path.join(p, marker)):
repo_root = p
break
if repo_root and repo_root > base_path:
root = os.path.relpath(base_path, repo_root)
if root not in groups:
groups[root] = []
groups[root].append(path)
def get_xunit_content(report, testname, elapsed):
test_count = sum(max(len(r[1]), 1) for r in report)
error_count = sum(len(r[1]) for r in report)
data = {
'testname': testname,
'test_count': test_count,
'error_count': error_count,
'time': '%.3f' % round(elapsed, 3),
}
xml = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite
name="%(testname)s"
tests="%(test_count)d"
failures="%(error_count)d"
errors="0"
time="%(time)s"
>
""" % data
for (filename, errors) in report:
if errors:
for error in errors:
data = {
'quoted_name': quoteattr(
'%s [%s] (%s:%d)' % (
error['category'], error['confidence'],
filename, error['linenum'])),
'testname': testname,
'quoted_message': quoteattr(error['message']),
}
xml += """ <testcase
name=%(quoted_name)s
classname="%(testname)s"
>
<failure message=%(quoted_message)s/>
</testcase>
""" % data
else:
data = {
'quoted_location': quoteattr(filename),
'testname': testname,
}
xml += """ <testcase
name=%(quoted_location)s
classname="%(testname)s"/>
""" % data
data = {
'escaped_files': escape(''.join(['\n* %s' % r[0] for r in report])),
}
xml += """ <system-out>Checked files:%(escaped_files)s</system-out>
""" % data
xml += '</testsuite>\n'
return xml
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c4731f7bfa323ac3060b8de0e3d2a54c42b64d4 | 91 | py | Python | docs/tests/E0101.py | mrfyda/codacy-pylint-python3 | e360f6c0407edebe274835d3a881d67e96adf8ba | [
"Apache-2.0"
] | 17 | 2016-01-26T13:30:04.000Z | 2022-03-06T21:11:42.000Z | docs/tests/E0101.py | mrfyda/codacy-pylint-python3 | e360f6c0407edebe274835d3a881d67e96adf8ba | [
"Apache-2.0"
] | 50 | 2019-08-14T16:14:45.000Z | 2022-03-31T11:00:50.000Z | docs/tests/E0101.py | mrfyda/codacy-pylint-python3 | e360f6c0407edebe274835d3a881d67e96adf8ba | [
"Apache-2.0"
] | 15 | 2015-11-18T12:18:50.000Z | 2021-01-17T22:21:41.000Z | ##Patterns: E0101
class Test():
##Err: E0101
def __init__(self):
return 1 | 13 | 23 | 0.571429 |
init__(self):
return 1 | true | true |
1c47321c926529faca5535e6541d02617674a33d | 4,340 | py | Python | lg_media/scripts/browser_launcher.py | FuriousJulius/lg_ros_nodes | 15a84c5022ab2f5b038d11a5589cd4a34010b1d6 | [
"Apache-2.0"
] | 16 | 2015-10-10T11:55:37.000Z | 2022-02-24T22:47:48.000Z | lg_media/scripts/browser_launcher.py | FuriousJulius/lg_ros_nodes | 15a84c5022ab2f5b038d11a5589cd4a34010b1d6 | [
"Apache-2.0"
] | 292 | 2015-09-29T21:59:53.000Z | 2022-03-31T15:59:31.000Z | lg_media/scripts/browser_launcher.py | constantegonzalez/lg_ros_nodes | 1c7b08c42e90205922602c86805285508d1b7971 | [
"Apache-2.0"
] | 5 | 2017-05-03T06:22:43.000Z | 2021-08-19T16:54:14.000Z | #!/usr/bin/env python3
import rospy
from lg_msg_defs.msg import AdhocBrowsers, AdhocBrowser
from lg_common import AdhocBrowserPool
from lg_msg_defs.msg import AdhocMedias
from lg_common.helpers import add_url_params, make_soft_relaunch_callback
from urllib.request import url2pathname
from lg_common.helpers import run_with_influx_exception_handler
VIDEOSYNC_URL = 'http://localhost:8008/lg_media/webapps/videosync/index.html'
NODE_NAME = 'lg_media_browser_launcher'
class BasicBrowserData:
def __init__(self, publisher, leader, ros_port, ros_host, url, sync_rate,
frame_latency, ping_interval, hard_sync_diff,
min_playbackrate, max_playbackrate, autoplay, show_controls,
viewport_name):
self.publisher = publisher
self.leader = leader
self.show_controls = show_controls
self.autoplay = autoplay
self.ros_port = ros_port
self.ros_host = ros_host
self.url = url
self.sync_rate = sync_rate
self.frame_latency = frame_latency
self.ping_interval = ping_interval
self.hard_sync_diff = hard_sync_diff
self.min_playbackrate = min_playbackrate
self.max_playbackrate = max_playbackrate
self.viewport_name = viewport_name
def launch_browser(self, data):
"""
data: AdhocMedias, which is a list of AdhocMedia objects
Turns these medias into AdhocBrowsers and then publishes them
"""
msg = AdhocBrowsers()
for media in data.medias:
url = add_url_params(
self.url,
videoUrl=media.url,
master=self.leader,
loop=media.loop,
sync=True,
)
url = url2pathname(url)
rospy.logdebug('url for media: %s' % url)
new_browser = AdhocBrowser()
new_browser.id = 'adhoc_media_browser_%s' % self.viewport_name
new_browser.geometry = media.geometry
new_browser.url = url
msg.browsers.append(new_browser)
rospy.loginfo("New browser URL: %s" % url)
self.publisher.publish(msg)
def main():
rospy.init_node(NODE_NAME)
viewport_name = rospy.get_param('~viewport', None)
if not viewport_name:
msg = "Viewport not configured for lg_media browser_launcher - exiting"
rospy.logerr(msg)
exit(1)
browser_pool_publisher = rospy.Publisher('/media_service/launch_browser/%s' % viewport_name,
AdhocBrowsers, queue_size=10)
is_leader = str(rospy.get_param('~leader', False)).lower()
ros_port = str(rospy.get_param('~ros_port', '9090'))
ros_host = str(rospy.get_param('~ros_host', 'localhost'))
url = str(rospy.get_param('~videosync_url', VIDEOSYNC_URL))
sync_rate = str(rospy.get_param('~sync_rate', 60))
frame_latency = str(rospy.get_param('~frame_latency', 3 / 25))
ping_interval = str(rospy.get_param('~ping_interval', 1000))
hard_sync_diff = str(rospy.get_param('~hard_sync_diff', 1.0))
min_playbackrate = str(rospy.get_param('~min_playbackrate', 0.5))
max_playbackrate = str(rospy.get_param('~max_playbackrate', 1.5))
autoplay = str(rospy.get_param('~autoplay', False)).lower()
show_controls = str(rospy.get_param('~show_controls', False)).lower()
basic_browser_data = BasicBrowserData(browser_pool_publisher, is_leader,
ros_port, ros_host, url, sync_rate,
frame_latency, ping_interval,
hard_sync_diff, min_playbackrate,
max_playbackrate, autoplay,
show_controls, viewport_name)
browser_pool = AdhocBrowserPool(viewport_name)
make_soft_relaunch_callback(browser_pool.handle_soft_relaunch, groups=["media"])
rospy.Subscriber('/media_service/browser/%s' % viewport_name, AdhocMedias,
basic_browser_data.launch_browser)
rospy.Subscriber('/media_service/launch_browser/%s' % viewport_name, AdhocBrowsers,
browser_pool.handle_ros_message)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| 40.560748 | 96 | 0.647696 |
import rospy
from lg_msg_defs.msg import AdhocBrowsers, AdhocBrowser
from lg_common import AdhocBrowserPool
from lg_msg_defs.msg import AdhocMedias
from lg_common.helpers import add_url_params, make_soft_relaunch_callback
from urllib.request import url2pathname
from lg_common.helpers import run_with_influx_exception_handler
VIDEOSYNC_URL = 'http://localhost:8008/lg_media/webapps/videosync/index.html'
NODE_NAME = 'lg_media_browser_launcher'
class BasicBrowserData:
def __init__(self, publisher, leader, ros_port, ros_host, url, sync_rate,
frame_latency, ping_interval, hard_sync_diff,
min_playbackrate, max_playbackrate, autoplay, show_controls,
viewport_name):
self.publisher = publisher
self.leader = leader
self.show_controls = show_controls
self.autoplay = autoplay
self.ros_port = ros_port
self.ros_host = ros_host
self.url = url
self.sync_rate = sync_rate
self.frame_latency = frame_latency
self.ping_interval = ping_interval
self.hard_sync_diff = hard_sync_diff
self.min_playbackrate = min_playbackrate
self.max_playbackrate = max_playbackrate
self.viewport_name = viewport_name
def launch_browser(self, data):
msg = AdhocBrowsers()
for media in data.medias:
url = add_url_params(
self.url,
videoUrl=media.url,
master=self.leader,
loop=media.loop,
sync=True,
)
url = url2pathname(url)
rospy.logdebug('url for media: %s' % url)
new_browser = AdhocBrowser()
new_browser.id = 'adhoc_media_browser_%s' % self.viewport_name
new_browser.geometry = media.geometry
new_browser.url = url
msg.browsers.append(new_browser)
rospy.loginfo("New browser URL: %s" % url)
self.publisher.publish(msg)
def main():
rospy.init_node(NODE_NAME)
viewport_name = rospy.get_param('~viewport', None)
if not viewport_name:
msg = "Viewport not configured for lg_media browser_launcher - exiting"
rospy.logerr(msg)
exit(1)
browser_pool_publisher = rospy.Publisher('/media_service/launch_browser/%s' % viewport_name,
AdhocBrowsers, queue_size=10)
is_leader = str(rospy.get_param('~leader', False)).lower()
ros_port = str(rospy.get_param('~ros_port', '9090'))
ros_host = str(rospy.get_param('~ros_host', 'localhost'))
url = str(rospy.get_param('~videosync_url', VIDEOSYNC_URL))
sync_rate = str(rospy.get_param('~sync_rate', 60))
frame_latency = str(rospy.get_param('~frame_latency', 3 / 25))
ping_interval = str(rospy.get_param('~ping_interval', 1000))
hard_sync_diff = str(rospy.get_param('~hard_sync_diff', 1.0))
min_playbackrate = str(rospy.get_param('~min_playbackrate', 0.5))
max_playbackrate = str(rospy.get_param('~max_playbackrate', 1.5))
autoplay = str(rospy.get_param('~autoplay', False)).lower()
show_controls = str(rospy.get_param('~show_controls', False)).lower()
basic_browser_data = BasicBrowserData(browser_pool_publisher, is_leader,
ros_port, ros_host, url, sync_rate,
frame_latency, ping_interval,
hard_sync_diff, min_playbackrate,
max_playbackrate, autoplay,
show_controls, viewport_name)
browser_pool = AdhocBrowserPool(viewport_name)
make_soft_relaunch_callback(browser_pool.handle_soft_relaunch, groups=["media"])
rospy.Subscriber('/media_service/browser/%s' % viewport_name, AdhocMedias,
basic_browser_data.launch_browser)
rospy.Subscriber('/media_service/launch_browser/%s' % viewport_name, AdhocBrowsers,
browser_pool.handle_ros_message)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| true | true |
1c4732f965a4bbbf1360a297ce9591045ac03ef5 | 3,902 | py | Python | chesstab/gui/cqlrow.py | RogerMarsh/chesstab | 01d375dc6bf025b621612a84513e55c4640a78ad | [
"BSD-3-Clause"
] | null | null | null | chesstab/gui/cqlrow.py | RogerMarsh/chesstab | 01d375dc6bf025b621612a84513e55c4640a78ad | [
"BSD-3-Clause"
] | null | null | null | chesstab/gui/cqlrow.py | RogerMarsh/chesstab | 01d375dc6bf025b621612a84513e55c4640a78ad | [
"BSD-3-Clause"
] | null | null | null | # cqlrow.py
# Copyright 2016 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Create widgets to display Chess Query Language (ChessQL) statement records.
"""
import tkinter
from solentware_grid.gui.datarow import (
GRID_COLUMNCONFIGURE,
GRID_CONFIGURE,
WIDGET_CONFIGURE,
WIDGET,
ROW,
)
from .datarow import DataRow
from ..core.chessrecord import ChessDBrecordPartial
from .cqldbedit import CQLDbEdit
from .cqldbdelete import CQLDbDelete
from .cqldbshow import CQLDbShow
from . import constants
ON_DISPLAY_COLOUR = "#eba610" # a pale orange
class ChessDBrowCQL(ChessDBrecordPartial, DataRow):
"""Define row in list of ChessQL statements.
Add row methods to the ChessQL statement record definition.
"""
header_specification = [
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text="Description",
anchor=tkinter.W,
padx=0,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=0, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=1, uniform="pp"),
ROW: 0,
},
]
def __init__(self, database=None, ui=None):
"""Extend and associate record definition with database.
database - the open database that is source of row data
ui - the ChessUI instamce
"""
super().__init__()
self.ui = ui
self.set_database(database)
self.row_specification = [
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.W,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=0,
),
GRID_CONFIGURE: dict(column=0, sticky=tkinter.EW),
ROW: 0,
},
]
def show_row(self, dialog, oldobject):
"""Return a CQLDbShow dialog for instance.
dialog - a Toplevel
oldobject - a ChessDBrecordPartial containing original data
"""
return CQLDbShow(dialog, oldobject, ui=self.ui)
def delete_row(self, dialog, oldobject):
"""Return a CQLDbDelete dialog for instance.
dialog - a Toplevel
oldobject - a ChessDBrecordPartial containing original data
"""
return CQLDbDelete(dialog, oldobject, ui=self.ui)
def edit_row(self, dialog, newobject, oldobject, showinitial=True):
"""Return a CQLDbEdit dialog for instance.
dialog - a Toplevel
newobject - a ChessDBrecordPartial containing original data to be
edited
oldobject - a ChessDBrecordPartial containing original data
showintial == True - show both original and edited data
"""
return CQLDbEdit(
newobject, dialog, oldobject, showinitial=showinitial, ui=self.ui
)
def grid_row(self, **kargs):
"""Return super().grid_row(textitems=(...), **kargs).
Create textitems argument for ChessDBrowCQL instance.
"""
return super().grid_row(
textitems=(
self.value.get_name_text(),
# self.value.get_selection_rule_text(),
),
**kargs
)
def grid_row_on_display(self, **kargs):
self._current_row_background = ON_DISPLAY_COLOUR
return self.grid_row(background=ON_DISPLAY_COLOUR, **kargs)
def set_background_on_display(self, widgets):
self._current_row_background = ON_DISPLAY_COLOUR
self.set_background(widgets, self._current_row_background)
def make_ChessDBrowCQL(chessui):
"""Make ChessDBrowCQL with reference to ChessUI instance"""
def make_selection(database=None):
return ChessDBrowCQL(database=database, ui=chessui)
return make_selection
| 28.275362 | 78 | 0.6143 |
import tkinter
from solentware_grid.gui.datarow import (
GRID_COLUMNCONFIGURE,
GRID_CONFIGURE,
WIDGET_CONFIGURE,
WIDGET,
ROW,
)
from .datarow import DataRow
from ..core.chessrecord import ChessDBrecordPartial
from .cqldbedit import CQLDbEdit
from .cqldbdelete import CQLDbDelete
from .cqldbshow import CQLDbShow
from . import constants
ON_DISPLAY_COLOUR = "#eba610"
class ChessDBrowCQL(ChessDBrecordPartial, DataRow):
header_specification = [
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text="Description",
anchor=tkinter.W,
padx=0,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=0, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=1, uniform="pp"),
ROW: 0,
},
]
def __init__(self, database=None, ui=None):
super().__init__()
self.ui = ui
self.set_database(database)
self.row_specification = [
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.W,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=0,
),
GRID_CONFIGURE: dict(column=0, sticky=tkinter.EW),
ROW: 0,
},
]
def show_row(self, dialog, oldobject):
return CQLDbShow(dialog, oldobject, ui=self.ui)
def delete_row(self, dialog, oldobject):
return CQLDbDelete(dialog, oldobject, ui=self.ui)
def edit_row(self, dialog, newobject, oldobject, showinitial=True):
return CQLDbEdit(
newobject, dialog, oldobject, showinitial=showinitial, ui=self.ui
)
def grid_row(self, **kargs):
return super().grid_row(
textitems=(
self.value.get_name_text(),
),
**kargs
)
def grid_row_on_display(self, **kargs):
self._current_row_background = ON_DISPLAY_COLOUR
return self.grid_row(background=ON_DISPLAY_COLOUR, **kargs)
def set_background_on_display(self, widgets):
self._current_row_background = ON_DISPLAY_COLOUR
self.set_background(widgets, self._current_row_background)
def make_ChessDBrowCQL(chessui):
def make_selection(database=None):
return ChessDBrowCQL(database=database, ui=chessui)
return make_selection
| true | true |
1c4733f6e497d4ac869d53a3d5bd699077b4b8f3 | 1,267 | py | Python | wav2rec/_utils/printing.py | TariqAHassan/wav2rec | 8d3f33291f246d80a4935cf7aa2cc75f110d9c15 | [
"MIT"
] | 10 | 2021-11-12T03:58:05.000Z | 2022-02-19T08:13:30.000Z | wav2rec/_utils/printing.py | TariqAHassan/wav2rec | 8d3f33291f246d80a4935cf7aa2cc75f110d9c15 | [
"MIT"
] | null | null | null | wav2rec/_utils/printing.py | TariqAHassan/wav2rec | 8d3f33291f246d80a4935cf7aa2cc75f110d9c15 | [
"MIT"
] | 1 | 2021-11-12T03:58:05.000Z | 2021-11-12T03:58:05.000Z | """
Printing Utils
References:
* https://github.com/TariqAHassan/alsek/blob/master/tests/_utils/test_printing.py
"""
from datetime import datetime
from typing import Any, Dict, Optional
def _format_value(value: Any) -> Any:
if isinstance(value, (str, datetime)):
return f"'{value}'"
else:
return value
def _format_params(params: Dict[str, Any], join_on: str) -> str:
return join_on.join((f"{k}={_format_value(v)}" for k, v in params.items()))
def auto_repr(obj: object, new_line_threshold: Optional[int] = 5, **params: Any) -> str:
"""Autogenerate a class repr string.
Args:
obj (object): an object to generate a repr for
new_line_threshold (int, optional): number of ``params``
required to split the parameters over multiple lines.
**params (Keyword Args): parameters to include in the
repr string
Returns:
repr (str): repr string
"""
class_name = obj.__class__.__name__
if new_line_threshold is None or len(params) <= new_line_threshold:
start, join_on, end = "", ", ", ""
else:
start, join_on, end = "\n ", ",\n ", "\n"
return f"{class_name}({start}{_format_params(params, join_on=join_on)}{end})"
| 28.795455 | 89 | 0.631413 | from datetime import datetime
from typing import Any, Dict, Optional
def _format_value(value: Any) -> Any:
if isinstance(value, (str, datetime)):
return f"'{value}'"
else:
return value
def _format_params(params: Dict[str, Any], join_on: str) -> str:
return join_on.join((f"{k}={_format_value(v)}" for k, v in params.items()))
def auto_repr(obj: object, new_line_threshold: Optional[int] = 5, **params: Any) -> str:
class_name = obj.__class__.__name__
if new_line_threshold is None or len(params) <= new_line_threshold:
start, join_on, end = "", ", ", ""
else:
start, join_on, end = "\n ", ",\n ", "\n"
return f"{class_name}({start}{_format_params(params, join_on=join_on)}{end})"
| true | true |
1c4734480bf8310ef21253d0538b10a554dce1b8 | 1,105 | py | Python | phabricator/komand_phabricator/actions/status/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | phabricator/komand_phabricator/actions/status/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | phabricator/komand_phabricator/actions/status/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
ID = "id"
STATUS = "status"
class Output:
MESSAGE = "message"
class StatusInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Task ID",
"order": 1
},
"status": {
"type": "string",
"title": "Status",
"description": "Status name [Open|Resolved|Wontfix|Invalid|Spite]",
"order": 2
}
},
"required": [
"id",
"status"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class StatusOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"message": {
"type": "string",
"title": "Message",
"description": "When user is assigned message is: Status changed",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 17.539683 | 73 | 0.538462 |
import komand
import json
class Input:
ID = "id"
STATUS = "status"
class Output:
MESSAGE = "message"
class StatusInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Task ID",
"order": 1
},
"status": {
"type": "string",
"title": "Status",
"description": "Status name [Open|Resolved|Wontfix|Invalid|Spite]",
"order": 2
}
},
"required": [
"id",
"status"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class StatusOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"message": {
"type": "string",
"title": "Message",
"description": "When user is assigned message is: Status changed",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true | true |
1c47366623f38c145fc92b7022d4d60a8eea6ccf | 248 | py | Python | dslrpp/analysis/__init__.py | s-jevtic/DSLR-photometry-pipeline | d239b48ab13c3c95ff8da363ea90ac6dbde1efe5 | [
"MIT"
] | null | null | null | dslrpp/analysis/__init__.py | s-jevtic/DSLR-photometry-pipeline | d239b48ab13c3c95ff8da363ea90ac6dbde1efe5 | [
"MIT"
] | null | null | null | dslrpp/analysis/__init__.py | s-jevtic/DSLR-photometry-pipeline | d239b48ab13c3c95ff8da363ea90ac6dbde1efe5 | [
"MIT"
] | null | null | null | """
"""
from .photometry import SNR, instrumental_flux, lightcurve, save_lcData
from .period import periodogram, est_period
__all__ = [
"SNR", "instrumental_flux", "lightcurve", "save_lcData",
"periodogram", "est_period",
]
| 27.555556 | 71 | 0.673387 | from .photometry import SNR, instrumental_flux, lightcurve, save_lcData
from .period import periodogram, est_period
__all__ = [
"SNR", "instrumental_flux", "lightcurve", "save_lcData",
"periodogram", "est_period",
]
| true | true |
1c473726b38cc42504f78969a255f497d6a4c91b | 7,808 | py | Python | ropper/loaders/loader.py | cbayet/Ropper | 66adeb0a1d4322ced69643c3be2552c057d116d2 | [
"BSD-3-Clause"
] | 1,502 | 2015-01-07T09:11:08.000Z | 2022-03-29T10:08:26.000Z | ropper/loaders/loader.py | cbayet/Ropper | 66adeb0a1d4322ced69643c3be2552c057d116d2 | [
"BSD-3-Clause"
] | 126 | 2015-03-10T15:32:26.000Z | 2022-03-03T08:30:10.000Z | ropper/loaders/loader.py | cbayet/Ropper | 66adeb0a1d4322ced69643c3be2552c057d116d2 | [
"BSD-3-Clause"
] | 214 | 2015-03-10T00:17:16.000Z | 2022-03-19T07:04:08.000Z | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ropper.common.abstract import *
from ctypes import *
from ropper.common.enum import Enum
from struct import pack_into
from ropper.common.error import *
from ropper.arch import *
from hashlib import sha256
import re
class Type(Enum):
_enum_ = 'ELF PE MACH_O RAW NONE'
class DataContainer(object):
def __init__(self, **args):
setattr = super(DataContainer, self).__setattr__
for key, value in args.items():
setattr(key, value)
class Section(object):
def __init__(self, name, sectionbytes, virtualAddress, offset, struct=None):
if type(name) == bytes:
name = name.decode('ascii')
self.name = name
self.bytes = sectionbytes
self.virtualAddress = virtualAddress
self.offset = offset
self.struct = struct
@property
def size(self):
return len(self.bytes)
class Loader(Abstract):
def __init__(self, filename, bytes=None, arch=None):
super(Loader, self).__init__()
self._fileName = filename
self._bytes = None
self._bytes_p = None
self._arch = arch
self._gadgets = {}
self._checksum = 0x0
self._printer = None
self._manualImageBase = None
self.loaded = False
self.__binary = self._loadFile(filename, bytes)
self.__calculateChecksum()
if arch is None:
self._arch = self._loadDefaultArch()
@property
def checksum(self):
return self._checksum
@property
def _binary(self):
return self.__binary
@abstractproperty
def entryPoint(self):
return None
@property
def arch(self):
return self._arch
@arch.setter
def arch(self, arch):
self._arch = arch
@abstractproperty
def type(self):
return None
@abstractproperty
def executableSections(self):
return None
@abstractproperty
def dataSections(self):
return None
@abstractmethod
def _getImageBase():
pass
@abstractmethod
def getSection(self, name):
pass
@abstractmethod
def _loadDefaultArch(self):
pass
@abstractmethod
def setNX(self, enable):
pass
@abstractmethod
def setASLR(self, enable):
pass
@abstractmethod
def checksec(self):
pass
@property
def originalImageBase(self):
return self._getImageBase()
@property
def imageBase(self):
if self._manualImageBase == None:
return self._getImageBase()
return self._manualImageBase
@imageBase.setter
def imageBase(self, imageBase):
self._manualImageBase = imageBase
@property
def fileName(self):
return self._fileName
def __calculateChecksum(self):
m = sha256()
m.update(self._binary._bytes)
self._checksum = m.hexdigest()
@classmethod
def isSupportedFile(cls, fileName, bytes=None):
return False
@classmethod
def open(cls, fileName, bytes=None, raw=False, arch=None):
sc = Loader.__subclasses__()
Raw = None
for subclass in sc:
if subclass.__name__ != 'Raw':
if not raw and subclass.isSupportedFile(fileName, bytes):
if arch:
return subclass(fileName, bytes, arch=arch)
else:
return subclass(fileName, bytes)
else:
Raw = subclass
if Raw:
if not arch:
raise ArgumentError('Architecture has to be set, if raw file should be loaded')
return Raw(fileName, bytes=bytes, arch=arch)
else:
raise LoaderError('Not supported file type')
@property
def loaded(self):
return self._loaded
@loaded.setter
def loaded(self, isloaded):
self._loaded = isloaded
@property
def printer(self):
return self._printer
@printer.setter
def printer(self, new_printer):
self._printer = new_printer
@property
def gadgets(self):
return self._gadgets
@gadgets.setter
def gadgets(self, new_gadgets):
self._gadgets = new_gadgets
def _loadFile(self, fileName, bytes=None):
pass
def assertFileRange(self, value):
assert value >= self._bytes_p.value and value <= (
self._bytes_p.value + len(self._bytes)), 'Pointer not in file range'
def _searchString(self, sections, string=None, length=0):
toReturn = []
if not string or string == '[ -~]{2}[ -~]*':
string = '[ -~]{2}[ -~]*'
else:
string = self.arch.searcher.prepareFilter(string)
string = string.encode('ascii') # python 3 compatibility
for section in sections:
b = bytes(bytearray(section.bytes))
for match in re.finditer(string, b):
if length > 0:
if len(match.group()) >= length:
toReturn.append((self.imageBase + section.offset + match.start(), match.group()))
else:
toReturn.append((self.imageBase + section.offset + match.start(), match.group()))
return toReturn
def searchDataString(self, string=None, length=0):
return self._searchString(list(self.dataSections), string, length)
def searchString(self, string=None, length=0, sectionName=None):
sections = list(self.dataSections)
sections.extend(self.executableSections)
if sectionName != None:
for section in sections:
if section.name == sectionName:
return self._searchString([section], string, length)
else:
return self._searchString(sections, string, length)
def save(self, fileName=None):
if not fileName:
fileName = self.fileName
try:
with open(fileName, 'wb') as f:
f.write(self._binary._bytes)
except BaseException as e:
raise LoaderError(e)
# def calculateImageBase(self, section):
# ib = self.imageBase
# if self.manualImagebase == None:
# return ib
# return self.manualImagebase
| 28.49635 | 105 | 0.632813 |
from ropper.common.abstract import *
from ctypes import *
from ropper.common.enum import Enum
from struct import pack_into
from ropper.common.error import *
from ropper.arch import *
from hashlib import sha256
import re
class Type(Enum):
_enum_ = 'ELF PE MACH_O RAW NONE'
class DataContainer(object):
def __init__(self, **args):
setattr = super(DataContainer, self).__setattr__
for key, value in args.items():
setattr(key, value)
class Section(object):
def __init__(self, name, sectionbytes, virtualAddress, offset, struct=None):
if type(name) == bytes:
name = name.decode('ascii')
self.name = name
self.bytes = sectionbytes
self.virtualAddress = virtualAddress
self.offset = offset
self.struct = struct
@property
def size(self):
return len(self.bytes)
class Loader(Abstract):
def __init__(self, filename, bytes=None, arch=None):
super(Loader, self).__init__()
self._fileName = filename
self._bytes = None
self._bytes_p = None
self._arch = arch
self._gadgets = {}
self._checksum = 0x0
self._printer = None
self._manualImageBase = None
self.loaded = False
self.__binary = self._loadFile(filename, bytes)
self.__calculateChecksum()
if arch is None:
self._arch = self._loadDefaultArch()
@property
def checksum(self):
return self._checksum
@property
def _binary(self):
return self.__binary
@abstractproperty
def entryPoint(self):
return None
@property
def arch(self):
return self._arch
@arch.setter
def arch(self, arch):
self._arch = arch
@abstractproperty
def type(self):
return None
@abstractproperty
def executableSections(self):
return None
@abstractproperty
def dataSections(self):
return None
@abstractmethod
def _getImageBase():
pass
@abstractmethod
def getSection(self, name):
pass
@abstractmethod
def _loadDefaultArch(self):
pass
@abstractmethod
def setNX(self, enable):
pass
@abstractmethod
def setASLR(self, enable):
pass
@abstractmethod
def checksec(self):
pass
@property
def originalImageBase(self):
return self._getImageBase()
@property
def imageBase(self):
if self._manualImageBase == None:
return self._getImageBase()
return self._manualImageBase
@imageBase.setter
def imageBase(self, imageBase):
self._manualImageBase = imageBase
@property
def fileName(self):
return self._fileName
def __calculateChecksum(self):
m = sha256()
m.update(self._binary._bytes)
self._checksum = m.hexdigest()
@classmethod
def isSupportedFile(cls, fileName, bytes=None):
return False
@classmethod
def open(cls, fileName, bytes=None, raw=False, arch=None):
sc = Loader.__subclasses__()
Raw = None
for subclass in sc:
if subclass.__name__ != 'Raw':
if not raw and subclass.isSupportedFile(fileName, bytes):
if arch:
return subclass(fileName, bytes, arch=arch)
else:
return subclass(fileName, bytes)
else:
Raw = subclass
if Raw:
if not arch:
raise ArgumentError('Architecture has to be set, if raw file should be loaded')
return Raw(fileName, bytes=bytes, arch=arch)
else:
raise LoaderError('Not supported file type')
@property
def loaded(self):
return self._loaded
@loaded.setter
def loaded(self, isloaded):
self._loaded = isloaded
@property
def printer(self):
return self._printer
@printer.setter
def printer(self, new_printer):
self._printer = new_printer
@property
def gadgets(self):
return self._gadgets
@gadgets.setter
def gadgets(self, new_gadgets):
self._gadgets = new_gadgets
def _loadFile(self, fileName, bytes=None):
pass
def assertFileRange(self, value):
assert value >= self._bytes_p.value and value <= (
self._bytes_p.value + len(self._bytes)), 'Pointer not in file range'
def _searchString(self, sections, string=None, length=0):
toReturn = []
if not string or string == '[ -~]{2}[ -~]*':
string = '[ -~]{2}[ -~]*'
else:
string = self.arch.searcher.prepareFilter(string)
string = string.encode('ascii')
for section in sections:
b = bytes(bytearray(section.bytes))
for match in re.finditer(string, b):
if length > 0:
if len(match.group()) >= length:
toReturn.append((self.imageBase + section.offset + match.start(), match.group()))
else:
toReturn.append((self.imageBase + section.offset + match.start(), match.group()))
return toReturn
def searchDataString(self, string=None, length=0):
return self._searchString(list(self.dataSections), string, length)
def searchString(self, string=None, length=0, sectionName=None):
sections = list(self.dataSections)
sections.extend(self.executableSections)
if sectionName != None:
for section in sections:
if section.name == sectionName:
return self._searchString([section], string, length)
else:
return self._searchString(sections, string, length)
def save(self, fileName=None):
if not fileName:
fileName = self.fileName
try:
with open(fileName, 'wb') as f:
f.write(self._binary._bytes)
except BaseException as e:
raise LoaderError(e)
| true | true |
1c4737df2efb759c6d135e0d72ca30a3e78a147a | 4,871 | py | Python | _unittests/ut_notebook/test_dynamic_cs.py | sdpython/csharpyml | f814af89c5b988924a7f31fe71ec6eb515292070 | [
"MIT"
] | 4 | 2018-06-07T06:34:32.000Z | 2020-02-12T17:39:58.000Z | _unittests/ut_notebook/test_dynamic_cs.py | sdpython/csharpyml | f814af89c5b988924a7f31fe71ec6eb515292070 | [
"MIT"
] | 13 | 2018-05-21T23:06:58.000Z | 2018-12-30T17:57:11.000Z | _unittests/ut_notebook/test_dynamic_cs.py | sdpython/csharpyml | f814af89c5b988924a7f31fe71ec6eb515292070 | [
"MIT"
] | null | null | null | """
@brief test log(time=2s)
"""
import sys
import os
import unittest
from sklearn import datasets
import pandas
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.csharpyml.notebook.csmlmagics import CsMLMagics
class TestDynamicCS(ExtTestCase):
"""Test dynamic compilation."""
_script = """
public class IrisObservation
{
[Column("0")]
[ColumnName("Label")]
public string Label;
[Column("1")]
public float Sepal_length;
[Column("2")]
public float Sepal_width;
[Column("3")]
public float Petal_length;
[Column("4")]
public float Petal_width;
}
public class IrisPrediction
{
public uint PredictedLabel;
[VectorType(4)]
public float[] Score;
}
public class TrainTestIris
{
string _dataset;
PredictionFunction<IrisObservation, IrisPrediction> _fct;
public TrainTestIris(string iris)
{
_dataset = iris;
}
public void Train()
{
using (var env = new ConsoleEnvironment(verbose:false))
{
var args = new TextLoader.Arguments()
{
Separator = ",",
HasHeader = true,
Column = new TextLoader.Column[] {
TextLoader.Column.Parse("Label:U4[0-2]:0"),
new TextLoader.Column("Sepal_length", DataKind.R4, 1),
new TextLoader.Column("Sepal_width", DataKind.R4, 2),
new TextLoader.Column("Petal_length", DataKind.R4, 3),
new TextLoader.Column("Petal_width", DataKind.R4, 4),
}
};
var reader = new TextLoader(env, args);
var concat = new ColumnConcatenatingEstimator(env,
"Features", "Sepal_length",
"Sepal_width", "Petal_length", "Petal_width");
var km = new MulticlassLogisticRegression(env, "Label", "Features");
var pipeline = concat.Append(km);
IDataView trainingDataView = reader.Read(new MultiFileSource(_dataset));
var model = pipeline.Fit(trainingDataView);
var obs = new IrisObservation()
{
Sepal_length = 3.3f,
Sepal_width = 1.6f,
Petal_length = 0.2f,
Petal_width = 5.1f,
};
_fct = model.MakePredictionFunction<IrisObservation, IrisPrediction>(env);
}
}
public IrisPrediction Predict(double sl, double sw, double pl, double pw)
{
var obs = new IrisObservation()
{
Sepal_length = (float)sl,
Sepal_width = (float)sw,
Petal_length = (float)pl,
Petal_width = (float)pw,
};
return _fct.Predict(obs);
}
}
public static TrainTestIris ReturnMLClass(string ds)
{
return new TrainTestIris(ds);
}
"""
def test_src(self):
"skip pylint"
self.assertFalse(src is None)
def test_magic_cs(self):
cm = CsMLMagics()
fct = cm.mlnet("ReturnMLClass", TestDynamicCS._script)
if fct is None:
raise Exception(TestDynamicCS._script)
temp = get_temp_folder(__file__, "temp_nb_mlnet")
iris = datasets.load_iris()
X = iris.data
y = iris.target
features = ['Slength', 'Swidth', 'Plength', 'Pwidth']
df = pandas.DataFrame(X, columns=features)
df["Label"] = y
df = df[["Label"] + ['Slength', 'Swidth', 'Plength', 'Pwidth']]
dest = os.path.join(temp, "iris_data_id.txt")
df.to_csv(dest, sep=',', index=False)
cl = fct(dest)
cl.Train()
res = cl.Predict(3.4, 5.4, 3.2, 5.6)
label = res.PredictedLabel
score = list(res.Score)
self.assertEqual(label, 3)
self.assertEqual(len(score), 3)
if __name__ == "__main__":
unittest.main()
| 30.829114 | 112 | 0.485116 | import sys
import os
import unittest
from sklearn import datasets
import pandas
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.csharpyml.notebook.csmlmagics import CsMLMagics
class TestDynamicCS(ExtTestCase):
_script = """
public class IrisObservation
{
[Column("0")]
[ColumnName("Label")]
public string Label;
[Column("1")]
public float Sepal_length;
[Column("2")]
public float Sepal_width;
[Column("3")]
public float Petal_length;
[Column("4")]
public float Petal_width;
}
public class IrisPrediction
{
public uint PredictedLabel;
[VectorType(4)]
public float[] Score;
}
public class TrainTestIris
{
string _dataset;
PredictionFunction<IrisObservation, IrisPrediction> _fct;
public TrainTestIris(string iris)
{
_dataset = iris;
}
public void Train()
{
using (var env = new ConsoleEnvironment(verbose:false))
{
var args = new TextLoader.Arguments()
{
Separator = ",",
HasHeader = true,
Column = new TextLoader.Column[] {
TextLoader.Column.Parse("Label:U4[0-2]:0"),
new TextLoader.Column("Sepal_length", DataKind.R4, 1),
new TextLoader.Column("Sepal_width", DataKind.R4, 2),
new TextLoader.Column("Petal_length", DataKind.R4, 3),
new TextLoader.Column("Petal_width", DataKind.R4, 4),
}
};
var reader = new TextLoader(env, args);
var concat = new ColumnConcatenatingEstimator(env,
"Features", "Sepal_length",
"Sepal_width", "Petal_length", "Petal_width");
var km = new MulticlassLogisticRegression(env, "Label", "Features");
var pipeline = concat.Append(km);
IDataView trainingDataView = reader.Read(new MultiFileSource(_dataset));
var model = pipeline.Fit(trainingDataView);
var obs = new IrisObservation()
{
Sepal_length = 3.3f,
Sepal_width = 1.6f,
Petal_length = 0.2f,
Petal_width = 5.1f,
};
_fct = model.MakePredictionFunction<IrisObservation, IrisPrediction>(env);
}
}
public IrisPrediction Predict(double sl, double sw, double pl, double pw)
{
var obs = new IrisObservation()
{
Sepal_length = (float)sl,
Sepal_width = (float)sw,
Petal_length = (float)pl,
Petal_width = (float)pw,
};
return _fct.Predict(obs);
}
}
public static TrainTestIris ReturnMLClass(string ds)
{
return new TrainTestIris(ds);
}
"""
def test_src(self):
self.assertFalse(src is None)
def test_magic_cs(self):
cm = CsMLMagics()
fct = cm.mlnet("ReturnMLClass", TestDynamicCS._script)
if fct is None:
raise Exception(TestDynamicCS._script)
temp = get_temp_folder(__file__, "temp_nb_mlnet")
iris = datasets.load_iris()
X = iris.data
y = iris.target
features = ['Slength', 'Swidth', 'Plength', 'Pwidth']
df = pandas.DataFrame(X, columns=features)
df["Label"] = y
df = df[["Label"] + ['Slength', 'Swidth', 'Plength', 'Pwidth']]
dest = os.path.join(temp, "iris_data_id.txt")
df.to_csv(dest, sep=',', index=False)
cl = fct(dest)
cl.Train()
res = cl.Predict(3.4, 5.4, 3.2, 5.6)
label = res.PredictedLabel
score = list(res.Score)
self.assertEqual(label, 3)
self.assertEqual(len(score), 3)
if __name__ == "__main__":
unittest.main()
| true | true |
1c47385da5e1df91f69b6c3c9a480257a81f7483 | 705 | py | Python | repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 21 | 2018-11-20T15:58:39.000Z | 2022-03-15T19:57:24.000Z | repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 732 | 2018-11-21T18:33:26.000Z | 2022-03-31T16:16:24.000Z | repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 85 | 2018-11-20T17:55:00.000Z | 2022-03-29T09:40:31.000Z | from leapp.actors import Actor
from leapp.libraries.actor import opensshprotocolcheck
from leapp.models import Report, OpenSshConfig
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class OpenSshProtocolCheck(Actor):
"""
Protocol configuration option was removed.
Check the value of Protocol in OpenSSH server config file
and warn about its deprecation if it is set. This option was removed
in RHEL 7.4, but it might still be hanging around.
"""
name = 'open_ssh_protocol'
consumes = (OpenSshConfig, )
produces = (Report, )
tags = (ChecksPhaseTag, IPUWorkflowTag, )
def process(self):
opensshprotocolcheck.process(self.consume(OpenSshConfig))
| 30.652174 | 72 | 0.741844 | from leapp.actors import Actor
from leapp.libraries.actor import opensshprotocolcheck
from leapp.models import Report, OpenSshConfig
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class OpenSshProtocolCheck(Actor):
name = 'open_ssh_protocol'
consumes = (OpenSshConfig, )
produces = (Report, )
tags = (ChecksPhaseTag, IPUWorkflowTag, )
def process(self):
opensshprotocolcheck.process(self.consume(OpenSshConfig))
| true | true |
1c473885e4c622750632dab97a746f613fabe1e6 | 4,575 | py | Python | CIM16/IEC61970/Informative/InfGMLSupport/GmlSymbol.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM16/IEC61970/Informative/InfGMLSupport/GmlSymbol.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM16/IEC61970/Informative/InfGMLSupport/GmlSymbol.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 1 | 2021-04-02T18:04:49.000Z | 2021-04-02T18:04:49.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GmlSymbol(IdentifiedObject):
"""Describes how a feature is to appear on a map or display. The symbol describes not just the shape that should appear but also such graphical properties as color and opacity.Describes how a feature is to appear on a map or display. The symbol describes not just the shape that should appear but also such graphical properties as color and opacity.
"""
def __init__(self, version='', level='', type='', GmlFeatureStyles=None, GmlBaseSymbol=None, *args, **kw_args):
"""Initialises a new 'GmlSymbol' instance.
@param version: The version of the Symbol.
@param level: The level (of the map) where the symbol exists or the zoom levels at which this diagram object is displayed. As a way of de-cluttering displays, for example, some symbols and annotations are only shown when zoomed in.
@param type: The Symbol type.
@param GmlFeatureStyles:
@param GmlBaseSymbol:
"""
#: The version of the Symbol.
self.version = version
#: The level (of the map) where the symbol exists or the zoom levels at which this diagram object is displayed. As a way of de-cluttering displays, for example, some symbols and annotations are only shown when zoomed in.
self.level = level
#: The Symbol type.
self.type = type
self._GmlFeatureStyles = []
self.GmlFeatureStyles = [] if GmlFeatureStyles is None else GmlFeatureStyles
self._GmlBaseSymbol = None
self.GmlBaseSymbol = GmlBaseSymbol
super(GmlSymbol, self).__init__(*args, **kw_args)
_attrs = ["version", "level", "type"]
_attr_types = {"version": str, "level": str, "type": str}
_defaults = {"version": '', "level": '', "type": ''}
_enums = {}
_refs = ["GmlFeatureStyles", "GmlBaseSymbol"]
_many_refs = ["GmlFeatureStyles"]
def getGmlFeatureStyles(self):
return self._GmlFeatureStyles
def setGmlFeatureStyles(self, value):
for p in self._GmlFeatureStyles:
filtered = [q for q in p.GmlSymbols if q != self]
self._GmlFeatureStyles._GmlSymbols = filtered
for r in value:
if self not in r._GmlSymbols:
r._GmlSymbols.append(self)
self._GmlFeatureStyles = value
GmlFeatureStyles = property(getGmlFeatureStyles, setGmlFeatureStyles)
def addGmlFeatureStyles(self, *GmlFeatureStyles):
for obj in GmlFeatureStyles:
if self not in obj._GmlSymbols:
obj._GmlSymbols.append(self)
self._GmlFeatureStyles.append(obj)
def removeGmlFeatureStyles(self, *GmlFeatureStyles):
for obj in GmlFeatureStyles:
if self in obj._GmlSymbols:
obj._GmlSymbols.remove(self)
self._GmlFeatureStyles.remove(obj)
def getGmlBaseSymbol(self):
return self._GmlBaseSymbol
def setGmlBaseSymbol(self, value):
if self._GmlBaseSymbol is not None:
filtered = [x for x in self.GmlBaseSymbol.GmlSymbols if x != self]
self._GmlBaseSymbol._GmlSymbols = filtered
self._GmlBaseSymbol = value
if self._GmlBaseSymbol is not None:
if self not in self._GmlBaseSymbol._GmlSymbols:
self._GmlBaseSymbol._GmlSymbols.append(self)
GmlBaseSymbol = property(getGmlBaseSymbol, setGmlBaseSymbol)
| 44.417476 | 353 | 0.695738 |
from CIM16.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GmlSymbol(IdentifiedObject):
def __init__(self, version='', level='', type='', GmlFeatureStyles=None, GmlBaseSymbol=None, *args, **kw_args):
self.version = version
self.level = level
self.type = type
self._GmlFeatureStyles = []
self.GmlFeatureStyles = [] if GmlFeatureStyles is None else GmlFeatureStyles
self._GmlBaseSymbol = None
self.GmlBaseSymbol = GmlBaseSymbol
super(GmlSymbol, self).__init__(*args, **kw_args)
_attrs = ["version", "level", "type"]
_attr_types = {"version": str, "level": str, "type": str}
_defaults = {"version": '', "level": '', "type": ''}
_enums = {}
_refs = ["GmlFeatureStyles", "GmlBaseSymbol"]
_many_refs = ["GmlFeatureStyles"]
def getGmlFeatureStyles(self):
return self._GmlFeatureStyles
def setGmlFeatureStyles(self, value):
for p in self._GmlFeatureStyles:
filtered = [q for q in p.GmlSymbols if q != self]
self._GmlFeatureStyles._GmlSymbols = filtered
for r in value:
if self not in r._GmlSymbols:
r._GmlSymbols.append(self)
self._GmlFeatureStyles = value
GmlFeatureStyles = property(getGmlFeatureStyles, setGmlFeatureStyles)
def addGmlFeatureStyles(self, *GmlFeatureStyles):
for obj in GmlFeatureStyles:
if self not in obj._GmlSymbols:
obj._GmlSymbols.append(self)
self._GmlFeatureStyles.append(obj)
def removeGmlFeatureStyles(self, *GmlFeatureStyles):
for obj in GmlFeatureStyles:
if self in obj._GmlSymbols:
obj._GmlSymbols.remove(self)
self._GmlFeatureStyles.remove(obj)
def getGmlBaseSymbol(self):
return self._GmlBaseSymbol
def setGmlBaseSymbol(self, value):
if self._GmlBaseSymbol is not None:
filtered = [x for x in self.GmlBaseSymbol.GmlSymbols if x != self]
self._GmlBaseSymbol._GmlSymbols = filtered
self._GmlBaseSymbol = value
if self._GmlBaseSymbol is not None:
if self not in self._GmlBaseSymbol._GmlSymbols:
self._GmlBaseSymbol._GmlSymbols.append(self)
GmlBaseSymbol = property(getGmlBaseSymbol, setGmlBaseSymbol)
| true | true |
1c4739397755e22e44590763ca56a2172d9a5609 | 1,896 | py | Python | jina/executors/crafters/__init__.py | robertjrodger/jina | 3bf8c1578f4f1a39b1c154705a535c52e1490141 | [
"Apache-2.0"
] | null | null | null | jina/executors/crafters/__init__.py | robertjrodger/jina | 3bf8c1578f4f1a39b1c154705a535c52e1490141 | [
"Apache-2.0"
] | 2 | 2021-02-15T01:40:38.000Z | 2021-02-15T02:00:21.000Z | jina/executors/crafters/__init__.py | robertjrodger/jina | 3bf8c1578f4f1a39b1c154705a535c52e1490141 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import inspect
from typing import Dict
from .. import BaseExecutor
from ...helper import typename
class BaseCrafter(BaseExecutor):
"""
A :class:`BaseCrafter` transforms the content of `Document`.
It can be used for preprocessing, segmenting etc.
It is an interface for Crafters which is a family of executors intended to apply
transformations to single documents.
The apply function is :func:`craft`, where the name of the arguments will be used as keys of the content.
:param args: Additional positional arguments which are just used for the parent initialization
:param kwargs: Additional keyword arguments which are just used for the parent initialization
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.required_keys = [
k for k in inspect.getfullargspec(self.craft).args if k != 'self'
]
if not self.required_keys:
self.required_keys = [
k
for k in inspect.getfullargspec(inspect.unwrap(self.craft)).args
if k != 'self'
]
if not self.required_keys:
self.logger.warning(
f'{typename(self)} works on keys, but no keys are specified'
)
def craft(self, *args, **kwargs) -> Dict:
"""
Apply function of this executor.
The name of the arguments are used as keys, which are then used to tell :class:`Driver` what information to extract
from the protobuf request accordingly.
The name of the arguments should be always valid keys defined in the protobuf.
:param args: Extra variable length arguments
:param kwargs: Extra variable keyword arguments
"""
raise NotImplementedError
| 37.92 | 123 | 0.6577 | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import inspect
from typing import Dict
from .. import BaseExecutor
from ...helper import typename
class BaseCrafter(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.required_keys = [
k for k in inspect.getfullargspec(self.craft).args if k != 'self'
]
if not self.required_keys:
self.required_keys = [
k
for k in inspect.getfullargspec(inspect.unwrap(self.craft)).args
if k != 'self'
]
if not self.required_keys:
self.logger.warning(
f'{typename(self)} works on keys, but no keys are specified'
)
def craft(self, *args, **kwargs) -> Dict:
raise NotImplementedError
| true | true |
1c473c05bff8aec499bf1749a85df208a69118a7 | 11,036 | py | Python | detect_board.py | yashpatel5400/ARia | 1f9ad25f943f5b8859a80470715be8698863b2f8 | [
"MIT"
] | null | null | null | detect_board.py | yashpatel5400/ARia | 1f9ad25f943f5b8859a80470715be8698863b2f8 | [
"MIT"
] | null | null | null | detect_board.py | yashpatel5400/ARia | 1f9ad25f943f5b8859a80470715be8698863b2f8 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
def rectify(h):
if h.shape[0] * h.shape[1] != 8:
return None
h = h.reshape((4,2))
hnew = np.zeros((4,2))
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h,axis=1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
def get_corners(frame):
imcopy = frame.copy()
# Convert BGR to HSV
hsv = cv2.cvtColor(imcopy, cv2.COLOR_BGR2HSV)
# define range of orange color in HSV
lower_orange = np.array([0,100,100])
upper_orange = np.array([50,255,255])
# Threshold the HSV image to get only orange colors
mask = cv2.inRange(imcopy, lower_orange, upper_orange)
imcopy = cv2.bitwise_and(imcopy,imcopy, mask=mask)
# Get thresh into the correct cv2 readable format
ret,thresh = cv2.threshold(imcopy, 0, 1, cv2.THRESH_BINARY)
thresh = cv2.cvtColor(thresh, cv2.COLOR_RGB2GRAY)
# Find all the contours in the image
_, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get the convex hull of all those contours
convex_hulls = np.array(contours[:])
# Find the area of all those convex hulls so we can take the largest
contour_areas = [cv2.contourArea(c) for c in convex_hulls]
# Get the indices of the 4 largest contours.
largest_contour_idxes = np.array(contour_areas).argsort()[-4:][::-1]
# Get the 4 largest convex hulls
largest_convex_hulls = [convex_hulls[i] for i in largest_contour_idxes]
# TODO: Ensure the convex hulls are a minimum area
moments = [cv2.moments(c) for c in largest_convex_hulls]
centers = [(int(m['m10']/m['m00']), int(m['m01']/m['m00'])) for m in moments if m['m00'] != 0]
centers = np.array(centers)
if centers.shape == (0,):
return None
centers = rectify(centers)
return centers
def get_C_key(frame,corners):
imcopy = frame.copy()
# Convert BGR to HSV
hsv = cv2.cvtColor(imcopy, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([150,0,0])
upper_blue = np.array([255,100,100])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(imcopy, lower_blue, upper_blue)
imcopy = cv2.bitwise_and(imcopy,imcopy, mask=mask)
# Get thresh into the correct cv2 readable format
ret,thresh = cv2.threshold(imcopy, 0, 1, cv2.THRESH_BINARY)
thresh = cv2.cvtColor(thresh, cv2.COLOR_RGB2GRAY)
# Find all the contours in the image
_, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get the convex hull of all those contours
convex_hulls = np.array(contours)
# Find the area of all those convex hulls so we can take the largest
contour_areas = [cv2.contourArea(c) for c in convex_hulls]
# Get the indices of the largest contours.
largest_contour_idxes = np.array(contour_areas).argsort()[-1:][::-1]
# Get the largest convex hull
largest_convex_hulls = [convex_hulls[i] for i in largest_contour_idxes]
# TODO: Ensure the convex hull are a minimum area
# approximate the contour with a quadrangle
if len(largest_convex_hulls) == 0:
return None
peri = cv2.arcLength(largest_convex_hulls[0],True)
approx = cv2.approxPolyDP(largest_convex_hulls[0],0.02*peri,True)
approx = rectify(approx)
if approx is None:
return None
# get midpoints of corners
left_mdpt = [(corners[0,0]+corners[3,0])/2,(corners[0,1]+corners[3,1])/2]
right_mdpt = [(corners[1,0]+corners[2,0])/2,(corners[1,1]+corners[2,1])/2]
top_mdpt = [(corners[0,0]+corners[1,0])/2,(corners[0,1]+corners[1,1])/2]
bot_mdpt = [(corners[2,0]+corners[3,0])/2,(corners[2,1]+corners[3,1])/2]
# get bounding coordinates
board_left_x = left_mdpt[0]
board_right_x = right_mdpt[0]
board_top_y = top_mdpt[1]
board_bot_y = bot_mdpt[1]
# get top line of box which will be bottom of black key
top = (approx[0,1]+approx[1,1])/2
# get width of box, which will be width of a white key
# black keys will be 2/3 as wide as a white key
left_mdpt = [(approx[0,0]+approx[3,0])/2,(approx[0,1]+approx[3,1])/2]
right_mdpt = [(approx[1,0]+approx[2,0])/2,(approx[1,1]+approx[2,1])/2]
left_x = left_mdpt[0]
right_x = right_mdpt[0]
width = right_x - left_x
# get corners of key
ckey = [[left_x,board_top_y],[right_x,board_top_y],[right_x,board_bot_y],[left_x,board_bot_y]]
return(ckey,width,top,[board_left_x,board_right_x])
def remainder_black_keys(remainder,higher):
if higher:
if remainder == 1:
return 1
elif remainder == 2:
return 2
elif remainder == 3:
return 2
elif remainder == 4:
return 3
elif remainder == 5:
return 4
elif remainder == 6:
return 5
else:
return 0
else:
if remainder == 1:
return 0
elif remainder == 2:
return 1
elif remainder == 3:
return 2
elif remainder == 4:
return 3
elif remainder == 5:
return 3
elif remainder == 6:
return 4
else:
return 0
def get_all_keys(frame,corners):
# get the C key
C_key_output = get_C_key(frame,corners)
if C_key_output is None:
return {}
ckey = C_key_output[0]
key_width = C_key_output[1]
black_bot = C_key_output[2]
board_bounds = C_key_output[3]
# extrapolate positions of other keys
num_higher_white_keys = np.around((board_bounds[1] - ckey[1][0])/key_width,decimals=0)
higher_remainder = num_higher_white_keys % 7
higher_remainder = remainder_black_keys(higher_remainder,True)
num_higher_black_keys = (num_higher_white_keys//7)*5
keys = [(ckey[0][0],ckey)]
# white keys
repeats = np.arange(num_higher_white_keys)
higher_keys = [ (ckey[0][0]+shift*key_width,[[ckey[0][0]+shift*key_width,ckey[0][1]],[ckey[1][0]+shift*key_width,ckey[1][1]],[ckey[2][0]+shift*key_width,ckey[2][1]],[ckey[3][0]+shift*key_width,ckey[3][1]]]) for shift in repeats ]
# black keys
black_keys = []
key = [[ckey[0][0]+2*key_width/3,ckey[0][1]],[ckey[1][0]+key_width/3,ckey[1][1]],[ckey[2][0]+2*key_width/3,black_bot],[ckey[3][0]+key_width/3,black_bot]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
for i in range(int(num_higher_black_keys/5-1)):
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = 0
for i in range(1):
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
# sort by left x coordinate
for black_key in black_keys:
higher_keys.append(black_key)
higher_keys.sort()
key_boxes = [x for y,x in higher_keys]
notes = range(len(key_boxes))
key_dict = dict(zip(notes,key_boxes))
return key_dict
def get_board(frame):
corners = get_corners(frame)
if corners is None:
return {}
key_dict = get_all_keys(frame,corners)
return key_dict
| 41.961977 | 233 | 0.631026 | import numpy as np
import cv2
def rectify(h):
if h.shape[0] * h.shape[1] != 8:
return None
h = h.reshape((4,2))
hnew = np.zeros((4,2))
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h,axis=1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
def get_corners(frame):
imcopy = frame.copy()
hsv = cv2.cvtColor(imcopy, cv2.COLOR_BGR2HSV)
lower_orange = np.array([0,100,100])
upper_orange = np.array([50,255,255])
mask = cv2.inRange(imcopy, lower_orange, upper_orange)
imcopy = cv2.bitwise_and(imcopy,imcopy, mask=mask)
ret,thresh = cv2.threshold(imcopy, 0, 1, cv2.THRESH_BINARY)
thresh = cv2.cvtColor(thresh, cv2.COLOR_RGB2GRAY)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
convex_hulls = np.array(contours[:])
contour_areas = [cv2.contourArea(c) for c in convex_hulls]
largest_contour_idxes = np.array(contour_areas).argsort()[-4:][::-1]
largest_convex_hulls = [convex_hulls[i] for i in largest_contour_idxes]
moments = [cv2.moments(c) for c in largest_convex_hulls]
centers = [(int(m['m10']/m['m00']), int(m['m01']/m['m00'])) for m in moments if m['m00'] != 0]
centers = np.array(centers)
if centers.shape == (0,):
return None
centers = rectify(centers)
return centers
def get_C_key(frame,corners):
imcopy = frame.copy()
hsv = cv2.cvtColor(imcopy, cv2.COLOR_BGR2HSV)
lower_blue = np.array([150,0,0])
upper_blue = np.array([255,100,100])
mask = cv2.inRange(imcopy, lower_blue, upper_blue)
imcopy = cv2.bitwise_and(imcopy,imcopy, mask=mask)
ret,thresh = cv2.threshold(imcopy, 0, 1, cv2.THRESH_BINARY)
thresh = cv2.cvtColor(thresh, cv2.COLOR_RGB2GRAY)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
convex_hulls = np.array(contours)
contour_areas = [cv2.contourArea(c) for c in convex_hulls]
largest_contour_idxes = np.array(contour_areas).argsort()[-1:][::-1]
largest_convex_hulls = [convex_hulls[i] for i in largest_contour_idxes]
if len(largest_convex_hulls) == 0:
return None
peri = cv2.arcLength(largest_convex_hulls[0],True)
approx = cv2.approxPolyDP(largest_convex_hulls[0],0.02*peri,True)
approx = rectify(approx)
if approx is None:
return None
left_mdpt = [(corners[0,0]+corners[3,0])/2,(corners[0,1]+corners[3,1])/2]
right_mdpt = [(corners[1,0]+corners[2,0])/2,(corners[1,1]+corners[2,1])/2]
top_mdpt = [(corners[0,0]+corners[1,0])/2,(corners[0,1]+corners[1,1])/2]
bot_mdpt = [(corners[2,0]+corners[3,0])/2,(corners[2,1]+corners[3,1])/2]
board_left_x = left_mdpt[0]
board_right_x = right_mdpt[0]
board_top_y = top_mdpt[1]
board_bot_y = bot_mdpt[1]
top = (approx[0,1]+approx[1,1])/2
left_mdpt = [(approx[0,0]+approx[3,0])/2,(approx[0,1]+approx[3,1])/2]
right_mdpt = [(approx[1,0]+approx[2,0])/2,(approx[1,1]+approx[2,1])/2]
left_x = left_mdpt[0]
right_x = right_mdpt[0]
width = right_x - left_x
ckey = [[left_x,board_top_y],[right_x,board_top_y],[right_x,board_bot_y],[left_x,board_bot_y]]
return(ckey,width,top,[board_left_x,board_right_x])
def remainder_black_keys(remainder,higher):
if higher:
if remainder == 1:
return 1
elif remainder == 2:
return 2
elif remainder == 3:
return 2
elif remainder == 4:
return 3
elif remainder == 5:
return 4
elif remainder == 6:
return 5
else:
return 0
else:
if remainder == 1:
return 0
elif remainder == 2:
return 1
elif remainder == 3:
return 2
elif remainder == 4:
return 3
elif remainder == 5:
return 3
elif remainder == 6:
return 4
else:
return 0
def get_all_keys(frame,corners):
C_key_output = get_C_key(frame,corners)
if C_key_output is None:
return {}
ckey = C_key_output[0]
key_width = C_key_output[1]
black_bot = C_key_output[2]
board_bounds = C_key_output[3]
num_higher_white_keys = np.around((board_bounds[1] - ckey[1][0])/key_width,decimals=0)
higher_remainder = num_higher_white_keys % 7
higher_remainder = remainder_black_keys(higher_remainder,True)
num_higher_black_keys = (num_higher_white_keys//7)*5
keys = [(ckey[0][0],ckey)]
repeats = np.arange(num_higher_white_keys)
higher_keys = [ (ckey[0][0]+shift*key_width,[[ckey[0][0]+shift*key_width,ckey[0][1]],[ckey[1][0]+shift*key_width,ckey[1][1]],[ckey[2][0]+shift*key_width,ckey[2][1]],[ckey[3][0]+shift*key_width,ckey[3][1]]]) for shift in repeats ]
black_keys = []
key = [[ckey[0][0]+2*key_width/3,ckey[0][1]],[ckey[1][0]+key_width/3,ckey[1][1]],[ckey[2][0]+2*key_width/3,black_bot],[ckey[3][0]+key_width/3,black_bot]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
for i in range(int(num_higher_black_keys/5-1)):
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = 0
for i in range(1):
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
count = count + 1
if count >= higher_remainder:
break
last_key = black_keys[-1][1]
key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]
black_keys.append((key[0][0],key))
for black_key in black_keys:
higher_keys.append(black_key)
higher_keys.sort()
key_boxes = [x for y,x in higher_keys]
notes = range(len(key_boxes))
key_dict = dict(zip(notes,key_boxes))
return key_dict
def get_board(frame):
corners = get_corners(frame)
if corners is None:
return {}
key_dict = get_all_keys(frame,corners)
return key_dict
| true | true |
1c473c12193e06e2a39525eaa752bdd46ae838b4 | 4,930 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations_async/_express_route_service_providers_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations_async/_express_route_service_providers_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations_async/_express_route_service_providers_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations:
"""ExpressRouteServiceProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.ExpressRouteServiceProviderListResult"]:
"""Gets all the available express route service providers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteServiceProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.ExpressRouteServiceProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteServiceProviderListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'} # type: ignore
| 46.509434 | 135 | 0.669777 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations:
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.ExpressRouteServiceProviderListResult"]:
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'}
| true | true |
1c473d201736fc5a4253bb86984a126ba886d2b1 | 463 | py | Python | services/core-api/tests/parties/party_appt/resources/test_mine_party_appt_type_resource.py | bcgov/mds | 6c427a66a5edb4196222607291adef8fd6677038 | [
"Apache-2.0"
] | 25 | 2018-07-09T19:04:37.000Z | 2022-03-15T17:27:10.000Z | services/core-api/tests/parties/party_appt/resources/test_mine_party_appt_type_resource.py | areyeslo/mds | e8c38e593e09b78e2a57009c0d003d6c4bfa32e6 | [
"Apache-2.0"
] | 983 | 2018-04-25T20:08:07.000Z | 2022-03-31T21:45:20.000Z | services/core-api/tests/parties/party_appt/resources/test_mine_party_appt_type_resource.py | areyeslo/mds | e8c38e593e09b78e2a57009c0d003d6c4bfa32e6 | [
"Apache-2.0"
] | 58 | 2018-05-15T22:35:50.000Z | 2021-11-29T19:40:52.000Z | import json
from app.api.parties.party_appt.models.mine_party_appt_type import MinePartyAppointmentType
# GET
def test_get_mine_party_appt_type(test_client, db_session, auth_headers):
get_resp = test_client.get(
'/parties/mines/relationship-types', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 200
assert len(get_data) == len(MinePartyAppointmentType.get_all())
| 35.615385 | 91 | 0.779698 | import json
from app.api.parties.party_appt.models.mine_party_appt_type import MinePartyAppointmentType
def test_get_mine_party_appt_type(test_client, db_session, auth_headers):
get_resp = test_client.get(
'/parties/mines/relationship-types', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 200
assert len(get_data) == len(MinePartyAppointmentType.get_all())
| true | true |
1c473d39b2de3a119d3f5e5e4c9bb77889b782d6 | 923 | py | Python | setup.py | nolimitcarter/stock-mirror | 8bbc483af01f2d05d6f929d861a023e250500c8e | [
"RSA-MD"
] | 1 | 2021-02-27T18:59:42.000Z | 2021-02-27T18:59:42.000Z | setup.py | nolimitcarter/stock-mirror | 8bbc483af01f2d05d6f929d861a023e250500c8e | [
"RSA-MD"
] | null | null | null | setup.py | nolimitcarter/stock-mirror | 8bbc483af01f2d05d6f929d861a023e250500c8e | [
"RSA-MD"
] | 1 | 2021-02-21T04:21:40.000Z | 2021-02-21T04:21:40.000Z | #!/usr/bin/python
import os
import sys
from setuptools import setup, find_packages
# Must be ran as root or as sudo
if os.getuid() != 0:
print('ERROR: Need to run as root')
sys.exit(1)
# Install the requirements if the system does not have it installed
print('INFO: Checking and installing requirements')
os.system('! dpkg -S python-imaging-tk && apt-get -y install python-imaging-tk')
# Generate the requirements from the file for old instructions
print('INFO: Generating the requirements from requirements.txt')
packages = []
for line in open('requirements.txt', 'r'):
if not line.startswith('#'):
packages.append(line.strip())
# Run setuptools for pip
setup(
name='stock-mirror',
version='1.0',
description='Raspberry powered mirror',
author='nolimitcarter',
url='https://github.com/nolimitcarter/stock-mirror',
install_requires=packages,
packages=find_packages(),
)
| 27.969697 | 80 | 0.712893 |
import os
import sys
from setuptools import setup, find_packages
if os.getuid() != 0:
print('ERROR: Need to run as root')
sys.exit(1)
print('INFO: Checking and installing requirements')
os.system('! dpkg -S python-imaging-tk && apt-get -y install python-imaging-tk')
print('INFO: Generating the requirements from requirements.txt')
packages = []
for line in open('requirements.txt', 'r'):
if not line.startswith('#'):
packages.append(line.strip())
setup(
name='stock-mirror',
version='1.0',
description='Raspberry powered mirror',
author='nolimitcarter',
url='https://github.com/nolimitcarter/stock-mirror',
install_requires=packages,
packages=find_packages(),
)
| true | true |
1c473d5db17232127b3c528ae791a934a3665187 | 6,500 | py | Python | scripts/export_seqqa.py | philiptzou/hiv-variation | 7ba3ffb1510b5d2b72387a0dead94d81095a1cbc | [
"MIT"
] | null | null | null | scripts/export_seqqa.py | philiptzou/hiv-variation | 7ba3ffb1510b5d2b72387a0dead94d81095a1cbc | [
"MIT"
] | 2 | 2020-02-19T21:49:11.000Z | 2021-04-27T16:48:30.000Z | scripts/export_seqqa.py | hivdb/hiv-variation | 7ba3ffb1510b5d2b72387a0dead94d81095a1cbc | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import csv
import json
from collections import defaultdict
import click
from hivdbql import app
db = app.db
models = app.models
Isolate = models.Isolate
Host = models.Host
Species = models.Species
ClinicalIsolate = models.ClinicalIsolate
Subtype = models.Subtype
Sequence = models.Sequence
Reference = models.Reference
UNUSUAL_AAPCNT_THRESHOLD = {
'PR': 0.01,
'RT': 0.01,
'IN': 0.01
}
GENES = ('PR', 'RT', 'IN')
DRUG_CLASSES = ('PI', 'NRTI', 'NNRTI', 'RTI', 'INSTI')
DRUG_CLASS_GENE_MAP = {
'PI': 'PR',
'NRTI': 'RT',
'NNRTI': 'RT',
'RTI': 'RT',
'INSTI': 'IN'
}
MAJOR_SUBTYPES = ['A', 'B', 'C', 'CRF01_AE', 'CRF02_AG', 'D', 'F', 'G']
AMINO_ACIDS = 'ACDEFGHIKLMNPQRSTVWY_-*'
# UNUSUAL_CUTOFF = 0.0001 # 1 in 10,000 or 0.01%
CSV_HEADER = [
'IsolateID',
'Gene',
'# Unusuals',
'Unusuals',
'# APOBECs',
'APOBECs'
]
QUERY_CHUNK_SIZE = 500
CRITERIA_CHOICES = {
'HIV1_ONLY': Isolate._species.has(Species.species == 'HIV1'),
'HIV2_ONLY': Isolate._species.has(Species.species == 'HIV2'),
'PLASMA_ONLY': Isolate.clinical_isolate.has(
ClinicalIsolate.source == 'Plasma'),
'NO_CLONES': Isolate.clinical_isolate.has(
ClinicalIsolate.clone_method == 'None'),
'NO_QA_ISSUES': ~Isolate._filter.has(),
'GENBANK_ONLY': Isolate.sequences.any(
Sequence.accession.isnot(None) &
(Sequence.accession != '')
),
'NO_PARTIAL_MUTS': Isolate.sequences.any(
Sequence.sequence_type == 'PartialMutationList'
),
}
def build_consensus_lookup(aapcnt_data):
table = defaultdict(lambda: (None, -1.))
for aapcnt in aapcnt_data:
if aapcnt['subtype'] != 'All' or aapcnt['rx_type'] != 'all':
continue
gene = aapcnt['gene']
pos = aapcnt['position']
table[(gene, pos)] = max(
table[(gene, pos)],
(aapcnt['aa'], aapcnt['percent']),
key=lambda o: o[1])
return table
def unusual_mutation_lookup(aapcnt_data):
table = {}
for aapcnt in aapcnt_data:
if aapcnt['subtype'] != 'All' or aapcnt['rx_type'] != 'all':
continue
gene = aapcnt['gene']
pcnt = aapcnt['percent']
aa = aapcnt['aa']
if aa != '*' and pcnt > UNUSUAL_AAPCNT_THRESHOLD[gene]:
continue
# TODO: HIV2 only
if gene == 'RT' and pcnt > 240:
continue
if gene == 'IN' and pcnt > 270:
continue
table[(gene, aapcnt['position'], aa)] = pcnt
return table
def apobec_mutation_lookup(apobec_json):
apobec_data = json.load(apobec_json)
table = set()
for apobec in apobec_data:
table.add((apobec['gene'], apobec['position'], apobec['aa']))
return table
def iter_isolates(drugclass, criteria, is_hiv2):
print('Processing {} isolates...'
.format(drugclass))
gene = DRUG_CLASS_GENE_MAP[drugclass]
if is_hiv2:
criteria += ('HIV2_ONLY',)
else:
criteria += ('HIV1_ONLY',)
conds = [CRITERIA_CHOICES[crkey] for crkey in criteria]
query = (
Isolate.query
.filter(
Isolate.gene == gene,
Isolate.isolate_type == 'Clinical',
Isolate._host.has(Host.host == 'Human'),
*conds
)
.options(db.selectinload(Isolate.sequences)
.selectinload(Sequence.insertions))
.options(db.selectinload(Isolate.sequences)
.selectinload(Sequence.mixtures))
)
if not is_hiv2:
# for old HIV-2 isolate, there's no subtype table record
query = query.filter(
Isolate._subtype.has(Subtype.subtype.notin_(
['O', 'N', 'P', 'CPZ']
))
)
total = query.count()
query = query.order_by(Isolate.id)
for offset in range(0, total, QUERY_CHUNK_SIZE):
print(' {}/{} isolates...'.format(offset, total), end='\r')
yield from query.limit(QUERY_CHUNK_SIZE).offset(offset)
print(' {0} isolates... '.format(total))
def run_seqqa(drugclass, criteria, is_hiv2,
cons_lookup, uum_lookup, apm_lookup):
for isolate in iter_isolates(drugclass, criteria, is_hiv2):
gene = isolate.gene
# this method returns consensus or single sequence
sequence = isolate.get_or_create_consensus()
unusuals = []
apobecs = []
for pos, aas in sequence.aas:
cons = cons_lookup[(gene, pos)][0]
if '_' in aas:
aas = '_'
if len(aas) > 4:
continue
for aa in aas:
key = (gene, pos, aa)
if key in uum_lookup:
pcnt = uum_lookup[key]
unusuals.append('{}{}{} ({:.2f}%)'
.format(cons, pos, aa, pcnt * 100))
if key in apm_lookup:
apobecs.append('{}{}{}'.format(cons, pos, aa))
yield {
'IsolateID': isolate.id,
'Gene': gene,
'# Unusuals': len(unusuals),
'Unusuals': ', '.join(unusuals),
'# APOBECs': len(apobecs),
'APOBECs': ', '.join(apobecs),
}
@click.command()
@click.option('--aapcnt-json', type=click.File('r'), required=True)
@click.option('--apobec-json', type=click.File('r'), required=True)
@click.option('--filter', type=click.Choice(CRITERIA_CHOICES.keys()),
multiple=True, default=('NO_CLONES', 'NO_QA_ISSUES'),
show_default=True, help='specify filter criteria')
@click.option('--no-filter', is_flag=True)
@click.option('--hiv2', is_flag=True, help='create table for HIV-2 sequences')
@click.argument('output_file', type=click.File('w'), default='-')
def export_seqqa(aapcnt_json, apobec_json, output_file,
filter, no_filter, hiv2):
result = []
aapcnt_data = json.load(aapcnt_json)
cons_lookup = build_consensus_lookup(aapcnt_data)
uum_lookup = unusual_mutation_lookup(aapcnt_data)
apm_lookup = apobec_mutation_lookup(apobec_json)
if no_filter:
filter = []
with app.app_context():
for dc in ('PI', 'RTI', 'INSTI'):
result.extend(
run_seqqa(dc, filter, hiv2, cons_lookup,
uum_lookup, apm_lookup))
writer = csv.DictWriter(output_file, CSV_HEADER)
writer.writeheader()
writer.writerows(result)
if __name__ == '__main__':
export_seqqa()
| 31.553398 | 78 | 0.580308 |
import csv
import json
from collections import defaultdict
import click
from hivdbql import app
db = app.db
models = app.models
Isolate = models.Isolate
Host = models.Host
Species = models.Species
ClinicalIsolate = models.ClinicalIsolate
Subtype = models.Subtype
Sequence = models.Sequence
Reference = models.Reference
UNUSUAL_AAPCNT_THRESHOLD = {
'PR': 0.01,
'RT': 0.01,
'IN': 0.01
}
GENES = ('PR', 'RT', 'IN')
DRUG_CLASSES = ('PI', 'NRTI', 'NNRTI', 'RTI', 'INSTI')
DRUG_CLASS_GENE_MAP = {
'PI': 'PR',
'NRTI': 'RT',
'NNRTI': 'RT',
'RTI': 'RT',
'INSTI': 'IN'
}
MAJOR_SUBTYPES = ['A', 'B', 'C', 'CRF01_AE', 'CRF02_AG', 'D', 'F', 'G']
AMINO_ACIDS = 'ACDEFGHIKLMNPQRSTVWY_-*'
solateID',
'Gene',
'# Unusuals',
'Unusuals',
'# APOBECs',
'APOBECs'
]
QUERY_CHUNK_SIZE = 500
CRITERIA_CHOICES = {
'HIV1_ONLY': Isolate._species.has(Species.species == 'HIV1'),
'HIV2_ONLY': Isolate._species.has(Species.species == 'HIV2'),
'PLASMA_ONLY': Isolate.clinical_isolate.has(
ClinicalIsolate.source == 'Plasma'),
'NO_CLONES': Isolate.clinical_isolate.has(
ClinicalIsolate.clone_method == 'None'),
'NO_QA_ISSUES': ~Isolate._filter.has(),
'GENBANK_ONLY': Isolate.sequences.any(
Sequence.accession.isnot(None) &
(Sequence.accession != '')
),
'NO_PARTIAL_MUTS': Isolate.sequences.any(
Sequence.sequence_type == 'PartialMutationList'
),
}
def build_consensus_lookup(aapcnt_data):
table = defaultdict(lambda: (None, -1.))
for aapcnt in aapcnt_data:
if aapcnt['subtype'] != 'All' or aapcnt['rx_type'] != 'all':
continue
gene = aapcnt['gene']
pos = aapcnt['position']
table[(gene, pos)] = max(
table[(gene, pos)],
(aapcnt['aa'], aapcnt['percent']),
key=lambda o: o[1])
return table
def unusual_mutation_lookup(aapcnt_data):
table = {}
for aapcnt in aapcnt_data:
if aapcnt['subtype'] != 'All' or aapcnt['rx_type'] != 'all':
continue
gene = aapcnt['gene']
pcnt = aapcnt['percent']
aa = aapcnt['aa']
if aa != '*' and pcnt > UNUSUAL_AAPCNT_THRESHOLD[gene]:
continue
if gene == 'RT' and pcnt > 240:
continue
if gene == 'IN' and pcnt > 270:
continue
table[(gene, aapcnt['position'], aa)] = pcnt
return table
def apobec_mutation_lookup(apobec_json):
apobec_data = json.load(apobec_json)
table = set()
for apobec in apobec_data:
table.add((apobec['gene'], apobec['position'], apobec['aa']))
return table
def iter_isolates(drugclass, criteria, is_hiv2):
print('Processing {} isolates...'
.format(drugclass))
gene = DRUG_CLASS_GENE_MAP[drugclass]
if is_hiv2:
criteria += ('HIV2_ONLY',)
else:
criteria += ('HIV1_ONLY',)
conds = [CRITERIA_CHOICES[crkey] for crkey in criteria]
query = (
Isolate.query
.filter(
Isolate.gene == gene,
Isolate.isolate_type == 'Clinical',
Isolate._host.has(Host.host == 'Human'),
*conds
)
.options(db.selectinload(Isolate.sequences)
.selectinload(Sequence.insertions))
.options(db.selectinload(Isolate.sequences)
.selectinload(Sequence.mixtures))
)
if not is_hiv2:
query = query.filter(
Isolate._subtype.has(Subtype.subtype.notin_(
['O', 'N', 'P', 'CPZ']
))
)
total = query.count()
query = query.order_by(Isolate.id)
for offset in range(0, total, QUERY_CHUNK_SIZE):
print(' {}/{} isolates...'.format(offset, total), end='\r')
yield from query.limit(QUERY_CHUNK_SIZE).offset(offset)
print(' {0} isolates... '.format(total))
def run_seqqa(drugclass, criteria, is_hiv2,
cons_lookup, uum_lookup, apm_lookup):
for isolate in iter_isolates(drugclass, criteria, is_hiv2):
gene = isolate.gene
# this method returns consensus or single sequence
sequence = isolate.get_or_create_consensus()
unusuals = []
apobecs = []
for pos, aas in sequence.aas:
cons = cons_lookup[(gene, pos)][0]
if '_' in aas:
aas = '_'
if len(aas) > 4:
continue
for aa in aas:
key = (gene, pos, aa)
if key in uum_lookup:
pcnt = uum_lookup[key]
unusuals.append('{}{}{} ({:.2f}%)'
.format(cons, pos, aa, pcnt * 100))
if key in apm_lookup:
apobecs.append('{}{}{}'.format(cons, pos, aa))
yield {
'IsolateID': isolate.id,
'Gene': gene,
'
'Unusuals': ', '.join(unusuals),
'
'APOBECs': ', '.join(apobecs),
}
@click.command()
@click.option('--aapcnt-json', type=click.File('r'), required=True)
@click.option('--apobec-json', type=click.File('r'), required=True)
@click.option('--filter', type=click.Choice(CRITERIA_CHOICES.keys()),
multiple=True, default=('NO_CLONES', 'NO_QA_ISSUES'),
show_default=True, help='specify filter criteria')
@click.option('--no-filter', is_flag=True)
@click.option('--hiv2', is_flag=True, help='create table for HIV-2 sequences')
@click.argument('output_file', type=click.File('w'), default='-')
def export_seqqa(aapcnt_json, apobec_json, output_file,
filter, no_filter, hiv2):
result = []
aapcnt_data = json.load(aapcnt_json)
cons_lookup = build_consensus_lookup(aapcnt_data)
uum_lookup = unusual_mutation_lookup(aapcnt_data)
apm_lookup = apobec_mutation_lookup(apobec_json)
if no_filter:
filter = []
with app.app_context():
for dc in ('PI', 'RTI', 'INSTI'):
result.extend(
run_seqqa(dc, filter, hiv2, cons_lookup,
uum_lookup, apm_lookup))
writer = csv.DictWriter(output_file, CSV_HEADER)
writer.writeheader()
writer.writerows(result)
if __name__ == '__main__':
export_seqqa()
| true | true |
1c473df0f9c48b185376676de8e91c7a3df4ab2e | 7,762 | py | Python | raspberry_eye/pan_tilt/pan_tilt.py | GalBrandwine/Raspberry_Eye | dbfe2fd4f9a695dc9017007e88fa8a2a13dfbcaa | [
"Apache-2.0"
] | 1 | 2021-07-01T20:57:32.000Z | 2021-07-01T20:57:32.000Z | pan_tilt_tracking/pan_tilt/pan_tilt.py | GalBrandwine/Gimbal_Pi | e2fa465f17474c31c2cbe4be1959924224472e7e | [
"MIT"
] | null | null | null | pan_tilt_tracking/pan_tilt/pan_tilt.py | GalBrandwine/Gimbal_Pi | e2fa465f17474c31c2cbe4be1959924224472e7e | [
"MIT"
] | 3 | 2018-11-22T14:45:08.000Z | 2018-11-26T12:16:02.000Z | #!/usr/bin/env python
"""
================================================
ABElectronics Servo Pi pwm controller | PWM servo controller demo
run with: python demo_servomove.py
================================================
This demo shows how to set the limits of movement on a servo
and then move between those positions
mapping for my boars:
Library_channel | HAT_pwm_out
1 | 0
2 | 1
15 | 14
16 | 15 (not in use)
"""
import time
try:
from driver.ServoPi import Servo
except ImportError:
print("Failed to import ServoPi from python system path")
print("Importing from parent folder instead")
try:
import sys
sys.path.append("..")
from ServoPi import Servo
except ImportError:
raise ImportError(
"Failed to import library from parent folder")
# class PanTilt:
# """A project-specific class for my pan tilt mechanizem (NOT AN OOP THING). """
#
# def __init__(self, yaw, roll, pitch, address=0x6f, ):
# # create an instance of the servo class on I2C address 0x40
# servo = Servo(address) # 0x40)
#
# yaw = yaw = 14
# roll = roll = 0
# pitch = pitch = 1
#
# # set the servo minimum and maximum limits in milliseconds
# # the limits for a servo are typically between 1ms and 2ms.
#
# # Yaw can turn 180 deg
# servo.set_low_limit(0.7, yaw + 1)
# servo.set_high_limit(2.4, yaw + 1)
#
# # roll can turn 90 deg (-45 to +45)
# servo.set_low_limit(1.0, roll + 1)
# servo.set_high_limit(2.0, roll + 1)
#
# # Pith can turn 90 deg (-45 to +45)
# servo.set_low_limit(1.0, pitch + 1)
# servo.set_high_limit(2.0, pitch + 1)
#
# def servo_enable(self, number, flag):
# # Enable the outputs
# servo.output_enable() if flag is True else servo.output_disable()
#
# def pan(self, angle):
# if angle < 0:
# move(yaw + 1, 90 - angle, 180)
# else:
# move(yaw + 1, angle, 180)
#
# def tilt(self, angle):
# if angle < 0:
# move(pitch + 1, 90 - angle, 180)
# else:
# move(pitch + 1, angle, 180)
# Create an instance of the servo class on I2C address 0x40
servo = Servo(0x6F) # 0x40)
yaw = yaw = 14
roll = roll = 0
pitch = pitch = 1
# set the servo minimum and maximum limits in milliseconds
# the limits for a servo are typically between 1ms and 2ms.
# Yaw can turn 180 deg
servo.set_low_limit(0.7, yaw + 1)
servo.set_high_limit(2.4, yaw + 1)
# roll can turn 90 deg (-45 to +45)
servo.set_low_limit(1.0, roll + 1)
servo.set_high_limit(2.0, roll + 1)
# Pith can turn 90 deg (-45 to +45)
servo.set_low_limit(1.0, pitch + 1)
servo.set_high_limit(2.0, pitch + 1)
def servo_enable(number, flag):
# Enable / Disable the outputs
if flag is True:
servo.output_enable()
servo.move(yaw + 1, 90, 180)
servo.move(pitch + 1, 90, 180)
time.sleep(1)
else:
servo.sleep() # stop the timers of the PWM, so no ERRORS corrections on the servo...
servo.output_disable()
#def pan(angle):
#print("panning: {}".format(angle))
#if angle < 0:
#pos = servo.get_position(yaw + 1, 180)
#if pos is not 0:
#print("yaw: {}, in pos: {}".format(yaw,pos))
#servo.move(yaw + 1, 90 + pos + angle, 180)
#else:
#servo.move(yaw + 1, 90+ angle, 180)
def pan(angle):
servo.move(yaw + 1, 90+angle, 180)
def tilt(angle):
servo.move(pitch + 1, 90+angle, 180)
def main():
"""
Main program function
"""
# create an instance of the servo class on I2C address 0x40
servo = Servo(0x6F) # 0x40)
yaw = 14
roll = 0
pitch = 1
# set the servo minimum and maximum limits in milliseconds
# the limits for a servo are typically between 1ms and 2ms.
# Yaw can turn 180 deg
servo.set_low_limit(0.7, yaw + 1)
servo.set_high_limit(2.4, yaw + 1)
# roll can turn 90 deg (-45 to +45)
servo.set_low_limit(1.0, roll + 1)
servo.set_high_limit(2.0, roll + 1)
# Pith can turn 90 deg (-45 to +45)
servo.set_low_limit(1.0, pitch + 1)
servo.set_high_limit(2.0, pitch + 1)
# Enable the outputs
servo.output_enable()
# move the servo across its full range in increments of 10
try:
# angle = 0
# duty_cycle = angle / 18. + 3
# servo.move(yaw + 1, duty_cycle) # face forward (middle of rotation_range
# print(("for duty angle: {} duty_cicle: {}".format(angle, duty_cycle)))
# print("servo pos: {}".format(servo.get_position(yaw + 1)))
# time.sleep(1)
#
# angle = 90
# duty_cycle = angle / 18. + 3
# servo.move(yaw + 1, duty_cycle) # face forward (middle of rotation_range
# print(("for duty angle: {} duty_cicle: {}".format(angle, duty_cycle)))
# print("servo pos: {}".format(servo.get_position(yaw + 1)))
# time.sleep(1)
# servo.move(yaw + 1, 120) # face forward (middle of rotation_range
# print("servo pos: {}".format(servo.get_position(yaw + 1)))
#
# servo.move(roll + 1, 120) # face forward (middle of roll)
# print("servo pos: {}".format(servo.get_position(roll + 1)))
#
# servo.move(pitch + 1, 120) # face forward (middle of roll)
# print("servo pos: {}".format(servo.get_position(pitch + 1)))
angle = 0
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
while True:
for i in range(90, 180, 10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
for i in range(180, 90, -10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
for i in range(90, 0, -10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
for i in range(0, 90, 10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
# for i in range(0, 250, 10):
# servo.move(yaw + 1, i)
# print("servo pos: {}".format(servo.get_position(yaw + 1)))
# time.sleep(.5)
#
# servo.move(pitch + 1, 0) # face forward (middle of rotation_range
# print("servo pos: {}".format(servo.get_position(pitch + 1)))
# time.sleep(1)
#
# servo.move(pitch + 1, 120) # face forward (middle of rotation_range)
# print("servo pos: {}".format(servo.get_position(pitch + 1)))
# time.sleep(1)
#
# servo.move(pitch + 1, 250) # face forward (middle of rotation_range
# print("servo pos: {}".format(servo.get_position(pitch + 1)))
# time.sleep(1)
# for i in range(0, 250, 10):
# servo.move(yaw + 1, i)
# time.sleep(0.5)
# print("servo pos: {}".format(servo.get_position(yaw + 1)))
#
# for i in range(2, 0, -10):
# servo.move(yaw + 1, i)
print("moving")
except KeyboardInterrupt as err:
servo.sleep() # stop the timers of the PWM, so no ERRORS corrections on the servo...
print("\noutput disabled\n")
if __name__ == "__main__":
"""For testing. """
main()
| 32.751055 | 93 | 0.546251 |
import time
try:
from driver.ServoPi import Servo
except ImportError:
print("Failed to import ServoPi from python system path")
print("Importing from parent folder instead")
try:
import sys
sys.path.append("..")
from ServoPi import Servo
except ImportError:
raise ImportError(
"Failed to import library from parent folder")
)
servo.set_high_limit(2.0, pitch + 1)
def servo_enable(number, flag):
if flag is True:
servo.output_enable()
servo.move(yaw + 1, 90, 180)
servo.move(pitch + 1, 90, 180)
time.sleep(1)
else:
servo.sleep()
servo.output_disable()
def pan(angle):
servo.move(yaw + 1, 90+angle, 180)
def tilt(angle):
servo.move(pitch + 1, 90+angle, 180)
def main():
servo = Servo(0x6F)
yaw = 14
roll = 0
pitch = 1
servo.set_low_limit(0.7, yaw + 1)
servo.set_high_limit(2.4, yaw + 1)
servo.set_low_limit(1.0, roll + 1)
servo.set_high_limit(2.0, roll + 1)
servo.set_low_limit(1.0, pitch + 1)
servo.set_high_limit(2.0, pitch + 1)
servo.output_enable()
try:
s: {}".format(servo.get_position(yaw + 1, 180)))
while True:
for i in range(90, 180, 10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
for i in range(180, 90, -10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
for i in range(90, 0, -10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
for i in range(0, 90, 10):
servo.move(yaw + 1, i, 180)
print("servo pos: {}".format(servo.get_position(yaw + 1, 180)))
time.sleep(.5)
print("moving")
except KeyboardInterrupt as err:
servo.sleep()
print("\noutput disabled\n")
if __name__ == "__main__":
main()
| true | true |
1c473df1c5ab7f1a3f2a1362c0a5f273fa3b6093 | 1,252 | py | Python | pwd_python/int_search.py | miccaldas/pwd_python | 434a9453e554e1a0195fc78e43b0a2f9b0f7822f | [
"MIT"
] | null | null | null | pwd_python/int_search.py | miccaldas/pwd_python | 434a9453e554e1a0195fc78e43b0a2f9b0f7822f | [
"MIT"
] | null | null | null | pwd_python/int_search.py | miccaldas/pwd_python | 434a9453e554e1a0195fc78e43b0a2f9b0f7822f | [
"MIT"
] | null | null | null | from mysql.connector import connect, Error
from colr import color
import fire
def search_int():
try:
busca = input(color(" What is the id? ", fore="#fe7243"))
conn = connect(host="localhost", user="mic", password="xxxx", database="pwd")
cur = conn.cursor()
query = " SELECT pwdid, site, username, passwd, comment, time FROM pwd WHERE pwdid = " + busca
cur.execute(query)
records = cur.fetchall()
for row in records:
print(color(" [0] ID » ", fore="#3c828e"), color(str(row[0]), fore="#efb666"))
print(color(" [1] SITE » ", fore="#3c828e"), color(str(row[1]), fore="#efb666"))
print(color(" [2] USERNAME » ", fore="#3c828e"), color(str(row[2]), fore="#efb666"))
print(color(" [3] PASSWORD » ", fore="#3c828e"), color(str(row[3]), fore="#efb666"))
print(color(" [4] COMMENT » ", fore="#3c828e"), color(str(row[4]), fore="#efb666"))
print(color(" [5] TIME : ", fore="#3c828e"), color(str(row[5]), fore="#efb666"))
print("\n")
except Error as e:
print("Error while connecting to db", e)
finally:
if conn:
conn.close()
if __name__ == "__main__":
fire.Fire(search_int)
| 39.125 | 102 | 0.557508 | from mysql.connector import connect, Error
from colr import color
import fire
def search_int():
try:
busca = input(color(" What is the id? ", fore="#fe7243"))
conn = connect(host="localhost", user="mic", password="xxxx", database="pwd")
cur = conn.cursor()
query = " SELECT pwdid, site, username, passwd, comment, time FROM pwd WHERE pwdid = " + busca
cur.execute(query)
records = cur.fetchall()
for row in records:
print(color(" [0] ID » ", fore="#3c828e"), color(str(row[0]), fore="#efb666"))
print(color(" [1] SITE » ", fore="#3c828e"), color(str(row[1]), fore="#efb666"))
print(color(" [2] USERNAME » ", fore="#3c828e"), color(str(row[2]), fore="#efb666"))
print(color(" [3] PASSWORD » ", fore="#3c828e"), color(str(row[3]), fore="#efb666"))
print(color(" [4] COMMENT » ", fore="#3c828e"), color(str(row[4]), fore="#efb666"))
print(color(" [5] TIME : ", fore="#3c828e"), color(str(row[5]), fore="#efb666"))
print("\n")
except Error as e:
print("Error while connecting to db", e)
finally:
if conn:
conn.close()
if __name__ == "__main__":
fire.Fire(search_int)
| true | true |
1c473f9ba97beb6712e1c41d567868d7db8681e0 | 7,444 | py | Python | sp_experiment/tests/test_utils.py | sappelhoff/sp_psychopy | 79cae80eb920b35fb27a52acfde0eda38b9124b1 | [
"BSD-3-Clause"
] | 1 | 2022-03-11T14:05:31.000Z | 2022-03-11T14:05:31.000Z | sp_experiment/tests/test_utils.py | sappelhoff/sp_psychopy | 79cae80eb920b35fb27a52acfde0eda38b9124b1 | [
"BSD-3-Clause"
] | 8 | 2019-02-12T07:47:47.000Z | 2021-01-25T14:05:05.000Z | sp_experiment/tests/test_utils.py | sappelhoff/sp_psychopy | 79cae80eb920b35fb27a52acfde0eda38b9124b1 | [
"BSD-3-Clause"
] | 2 | 2019-02-19T17:10:43.000Z | 2022-03-11T14:05:32.000Z | """Testing the utility functions."""
import time
import os
import os.path as op
from tempfile import gettempdir
from shutil import rmtree, copyfile
from collections import OrderedDict
import pytest
import numpy as np
import pandas as pd
import sp_experiment
from sp_experiment.define_settings import (EXPECTED_FPS,
KEYLIST_SAMPLES
)
from sp_experiment.utils import (Fake_serial,
My_serial,
calc_bonus_payoff,
get_final_choice_outcomes,
get_payoff_dict_from_df,
get_passive_action,
get_passive_outcome,
get_jittered_waitframes,
log_data,
_get_payoff_setting,
)
from sp_experiment.define_payoff_settings import (get_payoff_settings,
get_payoff_dict
)
init_dir = op.dirname(sp_experiment.__file__)
data_dir = op.join(init_dir, 'experiment_data')
test_data_dir = op.join(init_dir, 'tests', 'data')
no_errors_file = op.join(test_data_dir, '2_trials_no_errors.tsv')
def test_serials():
"""Test the Fake_serial class."""
some_byte = bytes([1])
ser = Fake_serial()
assert ser.write(some_byte) == some_byte
# Also covers "mysleep"
waitsecs = 1
ser = My_serial(ser, waitsecs)
start = time.perf_counter()
ser.write(some_byte)
stop = time.perf_counter()
assert (stop - start) >= waitsecs
def test_calc_bonus_payoff():
"""Test bonus calculation."""
# Check for non-present data
bonus = calc_bonus_payoff(998)
assert isinstance(bonus, list)
assert len(bonus) == 4
assert bonus[0] == 'did not yet complete task "A".'
bonus = calc_bonus_payoff(999)
assert bonus[1] == 'did not yet complete task "B".'
# present data ... temporarily copy over a test file
tmp_fpath1 = op.join(data_dir, 'sub-998_task-spactive_events.tsv')
tmp_fpath2 = op.join(data_dir, 'sub-998_task-sppassive_events.tsv')
copyfile(no_errors_file, tmp_fpath1)
copyfile(no_errors_file, tmp_fpath2)
bonus = calc_bonus_payoff(998, exchange_rate=0.1)
# remove tmp files
os.remove(tmp_fpath1)
os.remove(tmp_fpath2)
assert bonus[-1] == '4 Euros'
def test_get_final_choice_outcomes():
"""Test getting final choice outcomes."""
df = pd.read_csv(no_errors_file, sep='\t')
outcomes = get_final_choice_outcomes(df)
expected_outcomes = [5, 9] # as can be read in the data file
np.testing.assert_array_equal(outcomes, expected_outcomes)
def test_get_payoff_dict_from_df():
"""Test getting payoff_dicts."""
df = pd.read_csv(no_errors_file, sep='\t')
# The trial argument is 0-indexed
payoff_dict = get_payoff_dict_from_df(df, 0)
assert isinstance(payoff_dict, OrderedDict)
# Make a more thorough test with the second payoff distribution
payoff_dict = get_payoff_dict_from_df(df, 1)
read_set = set(payoff_dict[0])
expected_set = set((3, 9))
assert len(read_set) == len(expected_set)
assert sorted(read_set) == sorted(expected_set)
read_set = set(payoff_dict[1])
expected_set = set((7, 8))
assert len(read_set) == len(expected_set)
assert sorted(read_set) == sorted(expected_set)
# There were only 2 trials, this should be out of index
with pytest.raises(IndexError):
get_payoff_dict_from_df(df, 2)
def test_get_passive_action():
"""Test getting an action for replay in passive condition."""
df = pd.read_csv(no_errors_file, sep='\t')
keys_rts = get_passive_action(df, 0, 0)
# keys_rts should be a list of tuples
assert isinstance(keys_rts, list)
assert len(keys_rts) == 1
assert isinstance(keys_rts[0], tuple)
# did we read the correct numbers
assert keys_rts[0][0] == KEYLIST_SAMPLES[0]
np.testing.assert_allclose(keys_rts[0][1], 0.227, rtol=0.01)
def test_get_passive_outcome():
"""Test getting an outcome for replay in passive condition."""
df = pd.read_csv(no_errors_file, sep='\t')
# If we pass the "last sample", we get the final choice outcome
outcome = get_passive_outcome(df, 0, -1)
outcomes = get_final_choice_outcomes(df)
assert outcome == outcomes[0]
# Other samples give us reasonable results
expected_outcomes = [3, 3, 3, 5, 5, 5, 4, 5, 3, 3, 3, 3]
for sample, expected in zip(range(12), expected_outcomes):
out = get_passive_outcome(df, 0, sample)
assert out == expected
def test_get_jittered_waitframes():
"""Test the waitframes func."""
n = 100
for _ in range(n):
wait_frames = get_jittered_waitframes(1000, 2000)
assert wait_frames >= EXPECTED_FPS and wait_frames <= EXPECTED_FPS*2
def test_log_data():
"""Sanity check the data logging."""
df = pd.read_csv(no_errors_file, sep='\t')
# Check that action_types are as expected
action_types = df['action_type'].dropna().unique().tolist()
np.testing.assert_array_equal(action_types,
['sample', 'forced_stop', 'final_choice'])
# Create a temporary logging file
myhash = str(hash(os.times()))
data_dir = op.join(gettempdir(), myhash)
os.makedirs(data_dir)
fname = 'tmp_data_file.tsv'
fpath = op.join(data_dir, fname)
# Log some data
log_data(fpath)
with open(fpath, 'r') as fin:
for i, line in enumerate(fin.readlines()):
# spot check some known data in the line
assert line.strip().split('\t')[-2] == '0'
# There should have been only one line
assert i == 0
# Log more data
log_data(fpath, action=5)
log_data(fpath, action=2)
log_data(fpath, action=3)
log_data(fpath, action=7)
df = pd.read_csv(fpath, sep='\t', header=None)
action_types = df[3].tolist()
action_vals = df[4].tolist()
assert len(action_types) == 5 and len(action_vals) == 5
assert np.isnan(action_types[0]) and np.isnan(action_vals[0])
assert action_types[1] == 'forced_stop' and action_vals[1] == 0
assert action_types[2] == 'stop' and action_vals[2] == 2
assert action_types[3] == 'final_choice' and action_vals[3] == 0
assert action_types[4] == 'premature_stop' and action_vals[4] == 2
# And even more data logging
payoff_settings = get_payoff_settings(0.1)
setting = payoff_settings[0, :]
payoff_dict = get_payoff_dict(setting)
log_data(fpath, payoff_dict=payoff_dict)
# Remove the temporary dir and all its contents
rmtree(data_dir, ignore_errors=True)
@pytest.mark.parametrize('trial, expected_setting', (
pytest.param(0, np.array((3, 98, 1, 0, 5, 4, 0.8, 0.2))), # noqa: E501
pytest.param(1, np.array((3, 9, 0.22, 0.78, 8, 7, 0.67, 0.33))), # noqa: E501
))
def test_get_payoff_setting_aux(trial, expected_setting):
"""Test private func for getting payoff sets from df."""
# Test experienced
df = pd.read_csv(no_errors_file, sep='\t')
setting = _get_payoff_setting(df, trial, experienced=True)
np.testing.assert_array_equal(setting.squeeze(), expected_setting)
| 34.623256 | 103 | 0.637157 | import time
import os
import os.path as op
from tempfile import gettempdir
from shutil import rmtree, copyfile
from collections import OrderedDict
import pytest
import numpy as np
import pandas as pd
import sp_experiment
from sp_experiment.define_settings import (EXPECTED_FPS,
KEYLIST_SAMPLES
)
from sp_experiment.utils import (Fake_serial,
My_serial,
calc_bonus_payoff,
get_final_choice_outcomes,
get_payoff_dict_from_df,
get_passive_action,
get_passive_outcome,
get_jittered_waitframes,
log_data,
_get_payoff_setting,
)
from sp_experiment.define_payoff_settings import (get_payoff_settings,
get_payoff_dict
)
init_dir = op.dirname(sp_experiment.__file__)
data_dir = op.join(init_dir, 'experiment_data')
test_data_dir = op.join(init_dir, 'tests', 'data')
no_errors_file = op.join(test_data_dir, '2_trials_no_errors.tsv')
def test_serials():
some_byte = bytes([1])
ser = Fake_serial()
assert ser.write(some_byte) == some_byte
waitsecs = 1
ser = My_serial(ser, waitsecs)
start = time.perf_counter()
ser.write(some_byte)
stop = time.perf_counter()
assert (stop - start) >= waitsecs
def test_calc_bonus_payoff():
bonus = calc_bonus_payoff(998)
assert isinstance(bonus, list)
assert len(bonus) == 4
assert bonus[0] == 'did not yet complete task "A".'
bonus = calc_bonus_payoff(999)
assert bonus[1] == 'did not yet complete task "B".'
tmp_fpath1 = op.join(data_dir, 'sub-998_task-spactive_events.tsv')
tmp_fpath2 = op.join(data_dir, 'sub-998_task-sppassive_events.tsv')
copyfile(no_errors_file, tmp_fpath1)
copyfile(no_errors_file, tmp_fpath2)
bonus = calc_bonus_payoff(998, exchange_rate=0.1)
os.remove(tmp_fpath1)
os.remove(tmp_fpath2)
assert bonus[-1] == '4 Euros'
def test_get_final_choice_outcomes():
df = pd.read_csv(no_errors_file, sep='\t')
outcomes = get_final_choice_outcomes(df)
expected_outcomes = [5, 9]
np.testing.assert_array_equal(outcomes, expected_outcomes)
def test_get_payoff_dict_from_df():
df = pd.read_csv(no_errors_file, sep='\t')
payoff_dict = get_payoff_dict_from_df(df, 0)
assert isinstance(payoff_dict, OrderedDict)
payoff_dict = get_payoff_dict_from_df(df, 1)
read_set = set(payoff_dict[0])
expected_set = set((3, 9))
assert len(read_set) == len(expected_set)
assert sorted(read_set) == sorted(expected_set)
read_set = set(payoff_dict[1])
expected_set = set((7, 8))
assert len(read_set) == len(expected_set)
assert sorted(read_set) == sorted(expected_set)
with pytest.raises(IndexError):
get_payoff_dict_from_df(df, 2)
def test_get_passive_action():
df = pd.read_csv(no_errors_file, sep='\t')
keys_rts = get_passive_action(df, 0, 0)
assert isinstance(keys_rts, list)
assert len(keys_rts) == 1
assert isinstance(keys_rts[0], tuple)
assert keys_rts[0][0] == KEYLIST_SAMPLES[0]
np.testing.assert_allclose(keys_rts[0][1], 0.227, rtol=0.01)
def test_get_passive_outcome():
df = pd.read_csv(no_errors_file, sep='\t')
outcome = get_passive_outcome(df, 0, -1)
outcomes = get_final_choice_outcomes(df)
assert outcome == outcomes[0]
expected_outcomes = [3, 3, 3, 5, 5, 5, 4, 5, 3, 3, 3, 3]
for sample, expected in zip(range(12), expected_outcomes):
out = get_passive_outcome(df, 0, sample)
assert out == expected
def test_get_jittered_waitframes():
n = 100
for _ in range(n):
wait_frames = get_jittered_waitframes(1000, 2000)
assert wait_frames >= EXPECTED_FPS and wait_frames <= EXPECTED_FPS*2
def test_log_data():
df = pd.read_csv(no_errors_file, sep='\t')
action_types = df['action_type'].dropna().unique().tolist()
np.testing.assert_array_equal(action_types,
['sample', 'forced_stop', 'final_choice'])
myhash = str(hash(os.times()))
data_dir = op.join(gettempdir(), myhash)
os.makedirs(data_dir)
fname = 'tmp_data_file.tsv'
fpath = op.join(data_dir, fname)
log_data(fpath)
with open(fpath, 'r') as fin:
for i, line in enumerate(fin.readlines()):
assert line.strip().split('\t')[-2] == '0'
assert i == 0
log_data(fpath, action=5)
log_data(fpath, action=2)
log_data(fpath, action=3)
log_data(fpath, action=7)
df = pd.read_csv(fpath, sep='\t', header=None)
action_types = df[3].tolist()
action_vals = df[4].tolist()
assert len(action_types) == 5 and len(action_vals) == 5
assert np.isnan(action_types[0]) and np.isnan(action_vals[0])
assert action_types[1] == 'forced_stop' and action_vals[1] == 0
assert action_types[2] == 'stop' and action_vals[2] == 2
assert action_types[3] == 'final_choice' and action_vals[3] == 0
assert action_types[4] == 'premature_stop' and action_vals[4] == 2
payoff_settings = get_payoff_settings(0.1)
setting = payoff_settings[0, :]
payoff_dict = get_payoff_dict(setting)
log_data(fpath, payoff_dict=payoff_dict)
rmtree(data_dir, ignore_errors=True)
@pytest.mark.parametrize('trial, expected_setting', (
pytest.param(0, np.array((3, 98, 1, 0, 5, 4, 0.8, 0.2))),
pytest.param(1, np.array((3, 9, 0.22, 0.78, 8, 7, 0.67, 0.33))),
))
def test_get_payoff_setting_aux(trial, expected_setting):
df = pd.read_csv(no_errors_file, sep='\t')
setting = _get_payoff_setting(df, trial, experienced=True)
np.testing.assert_array_equal(setting.squeeze(), expected_setting)
| true | true |
1c473f9c6965b22315dbb289eff8247c71bdc790 | 15,497 | py | Python | ldm/data/imagenet.py | samedii/latent-diffusion | f13bf9bf463d95b5a16aeadd2b02abde31f769f8 | [
"MIT"
] | 563 | 2021-12-21T02:26:38.000Z | 2022-03-31T05:54:51.000Z | ldm/data/imagenet.py | samedii/latent-diffusion | f13bf9bf463d95b5a16aeadd2b02abde31f769f8 | [
"MIT"
] | 23 | 2021-12-22T10:00:00.000Z | 2022-03-24T20:43:49.000Z | ldm/data/imagenet.py | samedii/latent-diffusion | f13bf9bf463d95b5a16aeadd2b02abde31f769f8 | [
"MIT"
] | 51 | 2021-12-21T02:27:04.000Z | 2022-03-23T12:30:31.000Z | import os, yaml, pickle, shutil, tarfile, glob
import cv2
import albumentations
import PIL
import numpy as np
import torchvision.transforms.functional as TF
from omegaconf import OmegaConf
from functools import partial
from PIL import Image
from tqdm import tqdm
from torch.utils.data import Dataset, Subset
import taming.data.utils as tdu
from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve
from taming.data.imagenet import ImagePaths
from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light
def synset2idx(path_to_yaml="data/index_synset.yaml"):
with open(path_to_yaml) as f:
di2s = yaml.load(f)
return dict((v,k) for k,v in di2s.items())
class ImageNetBase(Dataset):
def __init__(self, config=None):
self.config = config or OmegaConf.create()
if not type(self.config)==dict:
self.config = OmegaConf.to_container(self.config)
self.keep_orig_class_label = self.config.get("keep_orig_class_label", False)
self.process_images = True # if False we skip loading & processing images and self.data contains filepaths
self._prepare()
self._prepare_synset_to_human()
self._prepare_idx_to_synset()
self._prepare_human_to_integer_label()
self._load()
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def _prepare(self):
raise NotImplementedError()
def _filter_relpaths(self, relpaths):
ignore = set([
"n06596364_9591.JPEG",
])
relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
if "sub_indices" in self.config:
indices = str_to_indices(self.config["sub_indices"])
synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings
self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)
files = []
for rpath in relpaths:
syn = rpath.split("/")[0]
if syn in synsets:
files.append(rpath)
return files
else:
return relpaths
def _prepare_synset_to_human(self):
SIZE = 2655750
URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
self.human_dict = os.path.join(self.root, "synset_human.txt")
if (not os.path.exists(self.human_dict) or
not os.path.getsize(self.human_dict)==SIZE):
download(URL, self.human_dict)
def _prepare_idx_to_synset(self):
URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
self.idx2syn = os.path.join(self.root, "index_synset.yaml")
if (not os.path.exists(self.idx2syn)):
download(URL, self.idx2syn)
def _prepare_human_to_integer_label(self):
URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1"
self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt")
if (not os.path.exists(self.human2integer)):
download(URL, self.human2integer)
with open(self.human2integer, "r") as f:
lines = f.read().splitlines()
assert len(lines) == 1000
self.human2integer_dict = dict()
for line in lines:
value, key = line.split(":")
self.human2integer_dict[key] = int(value)
def _load(self):
with open(self.txt_filelist, "r") as f:
self.relpaths = f.read().splitlines()
l1 = len(self.relpaths)
self.relpaths = self._filter_relpaths(self.relpaths)
print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
self.synsets = [p.split("/")[0] for p in self.relpaths]
self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
unique_synsets = np.unique(self.synsets)
class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
if not self.keep_orig_class_label:
self.class_labels = [class_dict[s] for s in self.synsets]
else:
self.class_labels = [self.synset2idx[s] for s in self.synsets]
with open(self.human_dict, "r") as f:
human_dict = f.read().splitlines()
human_dict = dict(line.split(maxsplit=1) for line in human_dict)
self.human_labels = [human_dict[s] for s in self.synsets]
labels = {
"relpath": np.array(self.relpaths),
"synsets": np.array(self.synsets),
"class_label": np.array(self.class_labels),
"human_label": np.array(self.human_labels),
}
if self.process_images:
self.size = retrieve(self.config, "size", default=256)
self.data = ImagePaths(self.abspaths,
labels=labels,
size=self.size,
random_crop=self.random_crop,
)
else:
self.data = self.abspaths
class ImageNetTrain(ImageNetBase):
NAME = "ILSVRC2012_train"
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
FILES = [
"ILSVRC2012_img_train.tar",
]
SIZES = [
147897477120,
]
def __init__(self, process_images=True, data_root=None, **kwargs):
self.process_images = process_images
self.data_root = data_root
super().__init__(**kwargs)
def _prepare(self):
if self.data_root:
self.root = os.path.join(self.data_root, self.NAME)
else:
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
self.datadir = os.path.join(self.root, "data")
self.txt_filelist = os.path.join(self.root, "filelist.txt")
self.expected_length = 1281167
self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
default=True)
if not tdu.is_prepared(self.root):
# prep
print("Preparing dataset {} in {}".format(self.NAME, self.root))
datadir = self.datadir
if not os.path.exists(datadir):
path = os.path.join(self.root, self.FILES[0])
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
import academictorrents as at
atpath = at.get(self.AT_HASH, datastore=self.root)
assert atpath == path
print("Extracting {} to {}".format(path, datadir))
os.makedirs(datadir, exist_ok=True)
with tarfile.open(path, "r:") as tar:
tar.extractall(path=datadir)
print("Extracting sub-tars.")
subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
for subpath in tqdm(subpaths):
subdir = subpath[:-len(".tar")]
os.makedirs(subdir, exist_ok=True)
with tarfile.open(subpath, "r:") as tar:
tar.extractall(path=subdir)
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
filelist = sorted(filelist)
filelist = "\n".join(filelist)+"\n"
with open(self.txt_filelist, "w") as f:
f.write(filelist)
tdu.mark_prepared(self.root)
class ImageNetValidation(ImageNetBase):
NAME = "ILSVRC2012_validation"
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
FILES = [
"ILSVRC2012_img_val.tar",
"validation_synset.txt",
]
SIZES = [
6744924160,
1950000,
]
def __init__(self, process_images=True, data_root=None, **kwargs):
self.data_root = data_root
self.process_images = process_images
super().__init__(**kwargs)
def _prepare(self):
if self.data_root:
self.root = os.path.join(self.data_root, self.NAME)
else:
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
self.datadir = os.path.join(self.root, "data")
self.txt_filelist = os.path.join(self.root, "filelist.txt")
self.expected_length = 50000
self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
default=False)
if not tdu.is_prepared(self.root):
# prep
print("Preparing dataset {} in {}".format(self.NAME, self.root))
datadir = self.datadir
if not os.path.exists(datadir):
path = os.path.join(self.root, self.FILES[0])
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
import academictorrents as at
atpath = at.get(self.AT_HASH, datastore=self.root)
assert atpath == path
print("Extracting {} to {}".format(path, datadir))
os.makedirs(datadir, exist_ok=True)
with tarfile.open(path, "r:") as tar:
tar.extractall(path=datadir)
vspath = os.path.join(self.root, self.FILES[1])
if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
download(self.VS_URL, vspath)
with open(vspath, "r") as f:
synset_dict = f.read().splitlines()
synset_dict = dict(line.split() for line in synset_dict)
print("Reorganizing into synset folders")
synsets = np.unique(list(synset_dict.values()))
for s in synsets:
os.makedirs(os.path.join(datadir, s), exist_ok=True)
for k, v in synset_dict.items():
src = os.path.join(datadir, k)
dst = os.path.join(datadir, v)
shutil.move(src, dst)
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
filelist = sorted(filelist)
filelist = "\n".join(filelist)+"\n"
with open(self.txt_filelist, "w") as f:
f.write(filelist)
tdu.mark_prepared(self.root)
class ImageNetSR(Dataset):
def __init__(self, size=None,
degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,
random_crop=True):
"""
Imagenet Superresolution Dataloader
Performs following ops in order:
1. crops a crop of size s from image either as random or center crop
2. resizes crop to size with cv2.area_interpolation
3. degrades resized crop with degradation_fn
:param size: resizing to size after cropping
:param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light
:param downscale_f: Low Resolution Downsample factor
:param min_crop_f: determines crop size s,
where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f)
:param max_crop_f: ""
:param data_root:
:param random_crop:
"""
self.base = self.get_base()
assert size
assert (size / downscale_f).is_integer()
self.size = size
self.LR_size = int(size / downscale_f)
self.min_crop_f = min_crop_f
self.max_crop_f = max_crop_f
assert(max_crop_f <= 1.)
self.center_crop = not random_crop
self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
self.pil_interpolation = False # gets reset later if incase interp_op is from pillow
if degradation == "bsrgan":
self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
elif degradation == "bsrgan_light":
self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
else:
interpolation_fn = {
"cv_nearest": cv2.INTER_NEAREST,
"cv_bilinear": cv2.INTER_LINEAR,
"cv_bicubic": cv2.INTER_CUBIC,
"cv_area": cv2.INTER_AREA,
"cv_lanczos": cv2.INTER_LANCZOS4,
"pil_nearest": PIL.Image.NEAREST,
"pil_bilinear": PIL.Image.BILINEAR,
"pil_bicubic": PIL.Image.BICUBIC,
"pil_box": PIL.Image.BOX,
"pil_hamming": PIL.Image.HAMMING,
"pil_lanczos": PIL.Image.LANCZOS,
}[degradation]
self.pil_interpolation = degradation.startswith("pil_")
if self.pil_interpolation:
self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
else:
self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,
interpolation=interpolation_fn)
def __len__(self):
return len(self.base)
def __getitem__(self, i):
example = self.base[i]
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
min_side_len = min(image.shape[:2])
crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
crop_side_len = int(crop_side_len)
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
else:
self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
image = self.cropper(image=image)["image"]
image = self.image_rescaler(image=image)["image"]
if self.pil_interpolation:
image_pil = PIL.Image.fromarray(image)
LR_image = self.degradation_process(image_pil)
LR_image = np.array(LR_image).astype(np.uint8)
else:
LR_image = self.degradation_process(image=image)["image"]
example["image"] = (image/127.5 - 1.0).astype(np.float32)
example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32)
return example
class ImageNetSRTrain(ImageNetSR):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_base(self):
with open("data/imagenet_train_hr_indices.p", "rb") as f:
indices = pickle.load(f)
dset = ImageNetTrain(process_images=False,)
return Subset(dset, indices)
class ImageNetSRValidation(ImageNetSR):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_base(self):
with open("data/imagenet_val_hr_indices.p", "rb") as f:
indices = pickle.load(f)
dset = ImageNetValidation(process_images=False,)
return Subset(dset, indices)
| 39.232911 | 115 | 0.600245 | import os, yaml, pickle, shutil, tarfile, glob
import cv2
import albumentations
import PIL
import numpy as np
import torchvision.transforms.functional as TF
from omegaconf import OmegaConf
from functools import partial
from PIL import Image
from tqdm import tqdm
from torch.utils.data import Dataset, Subset
import taming.data.utils as tdu
from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve
from taming.data.imagenet import ImagePaths
from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light
def synset2idx(path_to_yaml="data/index_synset.yaml"):
with open(path_to_yaml) as f:
di2s = yaml.load(f)
return dict((v,k) for k,v in di2s.items())
class ImageNetBase(Dataset):
def __init__(self, config=None):
self.config = config or OmegaConf.create()
if not type(self.config)==dict:
self.config = OmegaConf.to_container(self.config)
self.keep_orig_class_label = self.config.get("keep_orig_class_label", False)
self.process_images = True
self._prepare()
self._prepare_synset_to_human()
self._prepare_idx_to_synset()
self._prepare_human_to_integer_label()
self._load()
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def _prepare(self):
raise NotImplementedError()
def _filter_relpaths(self, relpaths):
ignore = set([
"n06596364_9591.JPEG",
])
relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
if "sub_indices" in self.config:
indices = str_to_indices(self.config["sub_indices"])
synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn)
self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)
files = []
for rpath in relpaths:
syn = rpath.split("/")[0]
if syn in synsets:
files.append(rpath)
return files
else:
return relpaths
def _prepare_synset_to_human(self):
SIZE = 2655750
URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
self.human_dict = os.path.join(self.root, "synset_human.txt")
if (not os.path.exists(self.human_dict) or
not os.path.getsize(self.human_dict)==SIZE):
download(URL, self.human_dict)
def _prepare_idx_to_synset(self):
URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
self.idx2syn = os.path.join(self.root, "index_synset.yaml")
if (not os.path.exists(self.idx2syn)):
download(URL, self.idx2syn)
def _prepare_human_to_integer_label(self):
URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1"
self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt")
if (not os.path.exists(self.human2integer)):
download(URL, self.human2integer)
with open(self.human2integer, "r") as f:
lines = f.read().splitlines()
assert len(lines) == 1000
self.human2integer_dict = dict()
for line in lines:
value, key = line.split(":")
self.human2integer_dict[key] = int(value)
def _load(self):
with open(self.txt_filelist, "r") as f:
self.relpaths = f.read().splitlines()
l1 = len(self.relpaths)
self.relpaths = self._filter_relpaths(self.relpaths)
print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
self.synsets = [p.split("/")[0] for p in self.relpaths]
self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
unique_synsets = np.unique(self.synsets)
class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
if not self.keep_orig_class_label:
self.class_labels = [class_dict[s] for s in self.synsets]
else:
self.class_labels = [self.synset2idx[s] for s in self.synsets]
with open(self.human_dict, "r") as f:
human_dict = f.read().splitlines()
human_dict = dict(line.split(maxsplit=1) for line in human_dict)
self.human_labels = [human_dict[s] for s in self.synsets]
labels = {
"relpath": np.array(self.relpaths),
"synsets": np.array(self.synsets),
"class_label": np.array(self.class_labels),
"human_label": np.array(self.human_labels),
}
if self.process_images:
self.size = retrieve(self.config, "size", default=256)
self.data = ImagePaths(self.abspaths,
labels=labels,
size=self.size,
random_crop=self.random_crop,
)
else:
self.data = self.abspaths
class ImageNetTrain(ImageNetBase):
NAME = "ILSVRC2012_train"
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
FILES = [
"ILSVRC2012_img_train.tar",
]
SIZES = [
147897477120,
]
def __init__(self, process_images=True, data_root=None, **kwargs):
self.process_images = process_images
self.data_root = data_root
super().__init__(**kwargs)
def _prepare(self):
if self.data_root:
self.root = os.path.join(self.data_root, self.NAME)
else:
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
self.datadir = os.path.join(self.root, "data")
self.txt_filelist = os.path.join(self.root, "filelist.txt")
self.expected_length = 1281167
self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
default=True)
if not tdu.is_prepared(self.root):
print("Preparing dataset {} in {}".format(self.NAME, self.root))
datadir = self.datadir
if not os.path.exists(datadir):
path = os.path.join(self.root, self.FILES[0])
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
import academictorrents as at
atpath = at.get(self.AT_HASH, datastore=self.root)
assert atpath == path
print("Extracting {} to {}".format(path, datadir))
os.makedirs(datadir, exist_ok=True)
with tarfile.open(path, "r:") as tar:
tar.extractall(path=datadir)
print("Extracting sub-tars.")
subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
for subpath in tqdm(subpaths):
subdir = subpath[:-len(".tar")]
os.makedirs(subdir, exist_ok=True)
with tarfile.open(subpath, "r:") as tar:
tar.extractall(path=subdir)
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
filelist = sorted(filelist)
filelist = "\n".join(filelist)+"\n"
with open(self.txt_filelist, "w") as f:
f.write(filelist)
tdu.mark_prepared(self.root)
class ImageNetValidation(ImageNetBase):
NAME = "ILSVRC2012_validation"
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
FILES = [
"ILSVRC2012_img_val.tar",
"validation_synset.txt",
]
SIZES = [
6744924160,
1950000,
]
def __init__(self, process_images=True, data_root=None, **kwargs):
self.data_root = data_root
self.process_images = process_images
super().__init__(**kwargs)
def _prepare(self):
if self.data_root:
self.root = os.path.join(self.data_root, self.NAME)
else:
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
self.datadir = os.path.join(self.root, "data")
self.txt_filelist = os.path.join(self.root, "filelist.txt")
self.expected_length = 50000
self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
default=False)
if not tdu.is_prepared(self.root):
print("Preparing dataset {} in {}".format(self.NAME, self.root))
datadir = self.datadir
if not os.path.exists(datadir):
path = os.path.join(self.root, self.FILES[0])
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
import academictorrents as at
atpath = at.get(self.AT_HASH, datastore=self.root)
assert atpath == path
print("Extracting {} to {}".format(path, datadir))
os.makedirs(datadir, exist_ok=True)
with tarfile.open(path, "r:") as tar:
tar.extractall(path=datadir)
vspath = os.path.join(self.root, self.FILES[1])
if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
download(self.VS_URL, vspath)
with open(vspath, "r") as f:
synset_dict = f.read().splitlines()
synset_dict = dict(line.split() for line in synset_dict)
print("Reorganizing into synset folders")
synsets = np.unique(list(synset_dict.values()))
for s in synsets:
os.makedirs(os.path.join(datadir, s), exist_ok=True)
for k, v in synset_dict.items():
src = os.path.join(datadir, k)
dst = os.path.join(datadir, v)
shutil.move(src, dst)
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
filelist = sorted(filelist)
filelist = "\n".join(filelist)+"\n"
with open(self.txt_filelist, "w") as f:
f.write(filelist)
tdu.mark_prepared(self.root)
class ImageNetSR(Dataset):
def __init__(self, size=None,
degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,
random_crop=True):
self.base = self.get_base()
assert size
assert (size / downscale_f).is_integer()
self.size = size
self.LR_size = int(size / downscale_f)
self.min_crop_f = min_crop_f
self.max_crop_f = max_crop_f
assert(max_crop_f <= 1.)
self.center_crop = not random_crop
self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
self.pil_interpolation = False
if degradation == "bsrgan":
self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
elif degradation == "bsrgan_light":
self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
else:
interpolation_fn = {
"cv_nearest": cv2.INTER_NEAREST,
"cv_bilinear": cv2.INTER_LINEAR,
"cv_bicubic": cv2.INTER_CUBIC,
"cv_area": cv2.INTER_AREA,
"cv_lanczos": cv2.INTER_LANCZOS4,
"pil_nearest": PIL.Image.NEAREST,
"pil_bilinear": PIL.Image.BILINEAR,
"pil_bicubic": PIL.Image.BICUBIC,
"pil_box": PIL.Image.BOX,
"pil_hamming": PIL.Image.HAMMING,
"pil_lanczos": PIL.Image.LANCZOS,
}[degradation]
self.pil_interpolation = degradation.startswith("pil_")
if self.pil_interpolation:
self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
else:
self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,
interpolation=interpolation_fn)
def __len__(self):
return len(self.base)
def __getitem__(self, i):
example = self.base[i]
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
min_side_len = min(image.shape[:2])
crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
crop_side_len = int(crop_side_len)
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
else:
self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
image = self.cropper(image=image)["image"]
image = self.image_rescaler(image=image)["image"]
if self.pil_interpolation:
image_pil = PIL.Image.fromarray(image)
LR_image = self.degradation_process(image_pil)
LR_image = np.array(LR_image).astype(np.uint8)
else:
LR_image = self.degradation_process(image=image)["image"]
example["image"] = (image/127.5 - 1.0).astype(np.float32)
example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32)
return example
class ImageNetSRTrain(ImageNetSR):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_base(self):
with open("data/imagenet_train_hr_indices.p", "rb") as f:
indices = pickle.load(f)
dset = ImageNetTrain(process_images=False,)
return Subset(dset, indices)
class ImageNetSRValidation(ImageNetSR):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_base(self):
with open("data/imagenet_val_hr_indices.p", "rb") as f:
indices = pickle.load(f)
dset = ImageNetValidation(process_images=False,)
return Subset(dset, indices)
| true | true |
1c47411f4cf5b0336d56840ed649c1d4c06df542 | 3,892 | py | Python | Inference/src/exit_placement/modules/waspVideo.py | ZSL98/ETBA | 618317698adb9e372fb11dc0c3a01f856e0759b0 | [
"MIT"
] | 1 | 2021-12-01T15:22:44.000Z | 2021-12-01T15:22:44.000Z | Inference/src/run_engine/modules/waspVideo.py | ZSL98/ETBA | 618317698adb9e372fb11dc0c3a01f856e0759b0 | [
"MIT"
] | null | null | null | Inference/src/run_engine/modules/waspVideo.py | ZSL98/ETBA | 618317698adb9e372fb11dc0c3a01f856e0759b0 | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class _AtrousModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):
super(_AtrousModule, self).__init__()
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class wasp(nn.Module):
def __init__(self, backbone, output_stride, BatchNorm):
super(wasp, self).__init__()
if backbone == 'drn':
inplanes = 512
elif backbone == 'mobilenet':
inplanes = 320
else:
inplanes = 2048
if output_stride == 16:
#dilations = [ 6, 12, 18, 24]
dilations = [24, 18, 12, 6]
#dilations = [6, 6, 6, 6]
elif output_stride == 8:
dilations = [48, 36, 24, 12]
else:
raise NotImplementedError
self.aspp1 = _AtrousModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)
self.aspp2 = _AtrousModule(256, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.aspp3 = _AtrousModule(256, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.aspp4 = _AtrousModule(256, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),
#nn.BatchNorm2d(256),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.conv2 = nn.Conv2d(256,256,1,bias=False)
self.bn1 = BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x1)
x3 = self.aspp3(x2)
x4 = self.aspp4(x3)
x1 = self.conv2(x1)
x2 = self.conv2(x2)
x3 = self.conv2(x3)
x4 = self.conv2(x4)
x1 = self.conv2(x1)
x2 = self.conv2(x2)
x3 = self.conv2(x3)
x4 = self.conv2(x4)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return self.dropout(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_wasp(backbone, output_stride, BatchNorm):
return wasp(backbone, output_stride, BatchNorm)
| 35.706422 | 113 | 0.548304 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class _AtrousModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):
super(_AtrousModule, self).__init__()
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class wasp(nn.Module):
def __init__(self, backbone, output_stride, BatchNorm):
super(wasp, self).__init__()
if backbone == 'drn':
inplanes = 512
elif backbone == 'mobilenet':
inplanes = 320
else:
inplanes = 2048
if output_stride == 16:
dilations = [24, 18, 12, 6]
elif output_stride == 8:
dilations = [48, 36, 24, 12]
else:
raise NotImplementedError
self.aspp1 = _AtrousModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)
self.aspp2 = _AtrousModule(256, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.aspp3 = _AtrousModule(256, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.aspp4 = _AtrousModule(256, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.conv2 = nn.Conv2d(256,256,1,bias=False)
self.bn1 = BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x1)
x3 = self.aspp3(x2)
x4 = self.aspp4(x3)
x1 = self.conv2(x1)
x2 = self.conv2(x2)
x3 = self.conv2(x3)
x4 = self.conv2(x4)
x1 = self.conv2(x1)
x2 = self.conv2(x2)
x3 = self.conv2(x3)
x4 = self.conv2(x4)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return self.dropout(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_wasp(backbone, output_stride, BatchNorm):
return wasp(backbone, output_stride, BatchNorm)
| true | true |
1c4741d36aea8c54d0b8b5af19f5e4e2cb552b0c | 2,840 | py | Python | perfkitbenchmarker/linux_packages/mysql80.py | dongbinghua/PerfKitBenchmarker | d3424af4b4d60b4a5c19009b8aee29ceab7132d4 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/mysql80.py | dongbinghua/PerfKitBenchmarker | d3424af4b4d60b4a5c19009b8aee29ceab7132d4 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/mysql80.py | dongbinghua/PerfKitBenchmarker | d3424af4b4d60b4a5c19009b8aee29ceab7132d4 | [
"Apache-2.0"
] | 1 | 2022-02-20T14:46:56.000Z | 2022-02-20T14:46:56.000Z | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing mysql installation and cleanup functions."""
import re
MYSQL_PSWD = 'perfkitbenchmarker'
PACKAGE_NAME = 'mysql'
def YumInstall(vm):
"""Installs the mysql package on the VM."""
raise NotImplementedError
def AptInstall(vm):
"""Installs the mysql package on the VM."""
vm.RemoteCommand('wget -c '
'https://repo.mysql.com//mysql-apt-config_0.8.17-1_all.deb')
vm.RemoteCommand('echo mysql-apt-config mysql-apt-config/select-server'
' select mysql-8.0 | sudo debconf-set-selections')
vm.RemoteCommand('echo mysql-apt-config mysql-apt-config/select-product'
' select Ok | sudo debconf-set-selections')
vm.RemoteCommand('sudo -E DEBIAN_FRONTEND=noninteractive dpkg -i'
' mysql-apt-config_0.8.17-1_all.deb')
_, stderr = vm.RemoteCommand('sudo apt-get update', ignore_failure=True)
if stderr:
if 'public key is not available:' in stderr:
# This error is due to mysql updated the repository and the public
# key is not updated.
# Import the updated public key
match = re.match('.*NO_PUBKEY ([A-Z0-9]*)', stderr)
if match:
key = match.group(1)
vm.RemoteCommand('sudo apt-key adv '
f'--keyserver keyserver.ubuntu.com --recv-keys {key}')
else:
raise RuntimeError('No public key found by regex.')
else:
raise RuntimeError(stderr)
vm.RemoteCommand('echo "mysql-server-8.0 mysql-server/root_password password '
f'{MYSQL_PSWD}" | sudo debconf-set-selections')
vm.RemoteCommand('echo "mysql-server-8.0 mysql-server/root_password_again '
f'password {MYSQL_PSWD}" | sudo debconf-set-selections')
vm.InstallPackages('mysql-server')
def YumGetPathToConfig(vm):
"""Returns the path to the mysql config file."""
raise NotImplementedError
def AptGetPathToConfig(vm):
"""Returns the path to the mysql config file."""
del vm
return '/etc/mysql/mysql.conf.d/mysqld.cnf'
def YumGetServiceName(vm):
"""Returns the name of the mysql service."""
raise NotImplementedError
def AptGetServiceName(vm):
"""Returns the name of the mysql service."""
del vm
return 'mysql'
| 33.809524 | 80 | 0.689085 |
import re
MYSQL_PSWD = 'perfkitbenchmarker'
PACKAGE_NAME = 'mysql'
def YumInstall(vm):
raise NotImplementedError
def AptInstall(vm):
vm.RemoteCommand('wget -c '
'https://repo.mysql.com//mysql-apt-config_0.8.17-1_all.deb')
vm.RemoteCommand('echo mysql-apt-config mysql-apt-config/select-server'
' select mysql-8.0 | sudo debconf-set-selections')
vm.RemoteCommand('echo mysql-apt-config mysql-apt-config/select-product'
' select Ok | sudo debconf-set-selections')
vm.RemoteCommand('sudo -E DEBIAN_FRONTEND=noninteractive dpkg -i'
' mysql-apt-config_0.8.17-1_all.deb')
_, stderr = vm.RemoteCommand('sudo apt-get update', ignore_failure=True)
if stderr:
if 'public key is not available:' in stderr:
match = re.match('.*NO_PUBKEY ([A-Z0-9]*)', stderr)
if match:
key = match.group(1)
vm.RemoteCommand('sudo apt-key adv '
f'--keyserver keyserver.ubuntu.com --recv-keys {key}')
else:
raise RuntimeError('No public key found by regex.')
else:
raise RuntimeError(stderr)
vm.RemoteCommand('echo "mysql-server-8.0 mysql-server/root_password password '
f'{MYSQL_PSWD}" | sudo debconf-set-selections')
vm.RemoteCommand('echo "mysql-server-8.0 mysql-server/root_password_again '
f'password {MYSQL_PSWD}" | sudo debconf-set-selections')
vm.InstallPackages('mysql-server')
def YumGetPathToConfig(vm):
raise NotImplementedError
def AptGetPathToConfig(vm):
del vm
return '/etc/mysql/mysql.conf.d/mysqld.cnf'
def YumGetServiceName(vm):
raise NotImplementedError
def AptGetServiceName(vm):
del vm
return 'mysql'
| true | true |
1c47428ab6ac6df0584b28628f8c4a0146a8c436 | 750 | py | Python | setup.py | lmkoch/logistic-normal | b270811b42adc7037e342c8b039a759460322de3 | [
"MIT"
] | 2 | 2017-10-13T01:03:51.000Z | 2019-05-24T09:46:55.000Z | setup.py | lmkoch/logistic-normal | b270811b42adc7037e342c8b039a759460322de3 | [
"MIT"
] | null | null | null | setup.py | lmkoch/logistic-normal | b270811b42adc7037e342c8b039a759460322de3 | [
"MIT"
] | null | null | null | __author__ = 'lkoch'
from setuptools import setup, find_packages
setup(
# Application name:
name='logisticnormal',
description='Logistic-normal distribution: provides probability density function and parameter estimation',
# Version number (initial):
version="0.1.0",
# Application author details:
author='Lisa Koch',
author_email='l.koch@imperial.ac.uk',
# Packages
packages=['logisticnormal'],
# Details
url='http://github.com/lmkoch/logistic-normal',
download_url='https://github.com/lmkoch/logistic-normal/zipball/master',
#
license='MIT',
install_requires = [
'scipy >= 0.10.1',
'numpy >= 1.6.2'
],
# long_description=open("README.txt").read(),
) | 21.428571 | 111 | 0.652 | __author__ = 'lkoch'
from setuptools import setup, find_packages
setup(
name='logisticnormal',
description='Logistic-normal distribution: provides probability density function and parameter estimation',
version="0.1.0",
author='Lisa Koch',
author_email='l.koch@imperial.ac.uk',
packages=['logisticnormal'],
url='http://github.com/lmkoch/logistic-normal',
download_url='https://github.com/lmkoch/logistic-normal/zipball/master',
license='MIT',
install_requires = [
'scipy >= 0.10.1',
'numpy >= 1.6.2'
],
) | true | true |
1c47429858f6c243073d4748b736054174321ec4 | 3,595 | py | Python | uhd_restpy/testplatform/sessions/ixnetwork/topology/dhcp6relaytlvprofile_26571057903c7fcd2a20eb10f55be22f.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/topology/dhcp6relaytlvprofile_26571057903c7fcd2a20eb10f55be22f.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/topology/dhcp6relaytlvprofile_26571057903c7fcd2a20eb10f55be22f.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Dhcp6RelayTlvProfile(Base):
"""DHCPv6 Relay Agent TLV Profiles.
The Dhcp6RelayTlvProfile class encapsulates a required dhcp6RelayTlvProfile resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'dhcp6RelayTlvProfile'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
}
def __init__(self, parent):
super(Dhcp6RelayTlvProfile, self).__init__(parent)
@property
def TlvProfile(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c.TlvProfile): An instance of the TlvProfile class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
return TlvProfile(self)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Name=None):
"""Updates dhcp6RelayTlvProfile resource on the server.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| 36.683673 | 169 | 0.685953 |
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Dhcp6RelayTlvProfile(Base):
__slots__ = ()
_SDM_NAME = 'dhcp6RelayTlvProfile'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
}
def __init__(self, parent):
super(Dhcp6RelayTlvProfile, self).__init__(parent)
@property
def TlvProfile(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
return TlvProfile(self)
@property
def Count(self):
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Name=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| true | true |
1c4742f3efbcd1dd18c5b65e1aabdd9036acc3cf | 8,057 | py | Python | ni_usb_6501.py | hunteddeerweb/NI_USB-6501 | c828ea2e0b2016be909f5083808651ea85c9abe1 | [
"WTFPL"
] | 7 | 2015-02-13T04:52:33.000Z | 2020-01-08T14:42:40.000Z | ni_usb_6501.py | hunteddeerweb/NI_USB-6501 | c828ea2e0b2016be909f5083808651ea85c9abe1 | [
"WTFPL"
] | null | null | null | ni_usb_6501.py | hunteddeerweb/NI_USB-6501 | c828ea2e0b2016be909f5083808651ea85c9abe1 | [
"WTFPL"
] | 4 | 2015-02-13T04:52:37.000Z | 2019-12-18T15:50:11.000Z | #!/usr/bin/python
## coding=utf-8
"""
The ni_usb_6501 is a digital IO module for USB from National Instruments.
Unfortunately their Linux driver is excessively large (>60MB), difficult to install
and doesn't offer off-the-shelf support for python.
This python driver is based on Marc Schutz's pioneer work on c driver
(https://github.com/schuetzm/ni-usb-6501)
INSTALLATION
1. Install the latest PyUSB (at least version 1.0.a3) from http://sourcceforge.net/projects/pyusb/
2. Change the permissions of the USB device node by creating a udev rule.
e.g. add the following line (and file) to a file in /etc/udev/rules.d/usb.rules
SUBSYSTEM=="usb", ENV{DEVTYPE}=="usb_device", MODE="0664", GROUP="usbusers"
This will set the owner of the device node to root:usbusers rather than root:root
After that add user to the usbusers group for enabling access to the device.
adduser _<user>_ usbusers
(Make sure you have group usbusers)
...and you are good to go.
TODO
- Counter operations
"""
import usb.core
import usb.util
ID_VENDOR = 0x3923
ID_PRODUCT = 0x718a
def get_adapter(**kwargs):
"""
Returns NiUsb6501 handler if only single adapter is connected to PC.
Forwards all parameters to pyusb (http://pyusb.sourceforge.net/docs/1.0/tutorial.html)
"""
device = usb.core.find(idVendor=ID_VENDOR, idProduct=ID_PRODUCT, **kwargs)
if not device:
raise ValueError('Device not found')
return NiUsb6501(device)
"""
Returns NiUsb6501 handle for every adapter that is connected to PC.
Forwards all parameters to pyusb (http://pyusb.sourceforge.net/docs/1.0/tutorial.html)
"""
def find_adapters(**kwargs):
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT, **kwargs)
if not devices:
raise ValueError('Device not found')
return [NiUsb6501(dev) for dev in devices]
class NiUsb6501:
"""
Typical usage:
adapter = get_adapter()
adapter.set_io_mode(0b00000000, 0x11111111, 0x01010101) # one bit per port 1=write, 0=read
# start calling adapter.read_port(port) and adapter.write_port(port, values)
"""
def __init__(self, device):
""" used only internally via get_adapter() and find_adapters() """
self.device = device
cfg = self.device.get_active_configuration()
interface_number = cfg[(0,0)].bInterfaceNumber
if self.device.is_kernel_driver_active(interface_number):
self.device.detach_kernel_driver(interface_number)
# set the active configuration. With no arguments, the first
# configuration will be the active one
self.device.set_configuration()
# This is needed to release interface, otherwise attach_kernel_driver fails
# due to "Resource busy"
usb.util.dispose_resources(self.device)
def set_io_mode(self, port0, port1, port2):
"""
Set mode for every IO pin. PIN modes are given in three groups (bitmasks represented by integers)
bit = 0: read
bit = 1: write
"""
buf = list("\x02\x10\x00\x00\x00\x05\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00")
buf[6] = chr(port0)
buf[7] = chr(port1)
buf[8] = chr(port2)
buf = ''.join(buf)
return self.send_request(0x12, buf)
def read_port(self, port):
"""
Read the value from all read-mode pins from one of the 8 PIN ports
port is 0, 1 or 2
"""
buf = list("\x02\x10\x00\x00\x00\x03\x00\x00")
buf[6] = chr(port)
buf = ''.join(buf)
response = self.send_request(0x0e, buf)
self.packet_matches(response,
"\x00\x0c\x01\x00\x00\x00\x00\x02\x00\x03\x00\x00",
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\xff")
return ord(response[10])
def write_port(self, port, value):
"""
Write value to all write-mode pins in one of the 8 PIN ports
port is 0, 1 or 2
value is 8 bits represented by integer
"""
buf = list("\x02\x10\x00\x00\x00\x03\x00\x00\x03\x00\x00\x00")
buf[6] = chr(port)
buf[9] = chr(value)
buf = ''.join(buf)
response = self.send_request(0x0f, buf)
self.packet_matches(response,
"\x00\x08\x01\x00\x00\x00\x00\x02",
"\xff\xff\xff\xff\xff\xff\xff\xff")
return response
##########################################################
# TODO: COUNTERS ARE NOT YET IMPLEMENTED
##########################################################
def read_counter(self):
pass
def write_counter(self):
pass
def start_counter(self):
pass
def stop_counter(self):
pass
##########################################################
# INTERNAL UTILITY FUNCTIONS
##########################################################
EP_IN, EP_OUT = 0x81, 0x01
HEADER_PACKET, HEADER_DATA = 4, 4
INTERFACE = 0
def send_request(self, cmd, request):
if len(request) + self.HEADER_PACKET + self.HEADER_DATA > 255:
raise ValueError('Request too long (%d bytes)' % (len(request) + self.HEADER_PACKET + self.HEADER_DATA))
buf = list("\x00\x01\x00\x00\x00\x00\x01\x00")
buf[3] = chr(self.HEADER_PACKET + self.HEADER_DATA + len(request))
buf[5] = chr(self.HEADER_DATA + len(request))
buf[7] = chr(cmd)
buf = ''.join(buf) + request
assert self.device.write(self.EP_OUT, buf, self.INTERFACE) == len(buf)
ret = self.device.read(self.EP_IN, len(buf), self.INTERFACE)
return ''.join([chr(x) for x in ret])[self.HEADER_PACKET:]
def packet_matches(self, actual, expected, mask):
if len(actual) != len(expected):
print repr(actual)
print repr(expected)
print repr(mask)
raise ValueError('Protocol error - invalid response length %d' % len(actual))
for b, e, m in zip(actual, expected, mask):
if (ord(b) & ord(m)) != (ord(e) & ord(m)):
raise ValueError("""Protocol error - invalid response
actual: %s
expected: %s
mask: %s
""" % (repr(actual), repr(expected), repr(mask)))
def release_interface(self):
"""
Free all resources, then the device can be used once again
"""
if self.device.is_kernel_driver_active(self.interface_number):
self.device.detach_kernel_driver(self.interface_number)
usb.util.release_interface(self.device, self.INTERFACE)
usb.util.dispose_resources(self.device)
self.device.reset()
self.device = None
#USAGE EXAMPLE
if __name__ == "__main__":
dev = get_adapter()
if not dev:
raise Exception("No device found")
dev.set_io_mode(0b11111111, 0b11111111, 0b00000000)
dev.write_port(0, 0b11001100)
dev.write_port(1, 0b10101010)
print bin(dev.read_port(2))
ret = dev.set_io_mode(0, 255, 0) # set all pins between 3-6 & 27-30 as output pins
# example has special fokus on port 3 & 30, the values ot the others are all set to high
# bitmask: 247: 1111 0111
# 27: 1 low byte
# 28: 1
# 29: 1
# 30: 0
# 6: 1
# 5: 1
# 4: 1
# 3: 1 high byte
ret = dev.write_port(1, 0) # both zero
print(dev.read_port(1))
ret = dev.write_port(1, 247) # 30 low
print(dev.read_port(1))
ret = dev.write_port(1, 127) # 3 low
print(dev.read_port(1))
ret = dev.write_port(1, 247) # 30 low
print(dev.read_port(1))
ret = dev.write_port(1, 127) # 3 low
print(dev.read_port(1))
ret = dev.write_port(1, 0) # both zero
print(dev.read_port(1))
ret = dev.write_port(1, 255) # both high
print(dev.read_port(1))
dev.release_interface() # clean exit, allows direct reuse without to replug the ni6501
del dev
| 32.619433 | 116 | 0.601961 |
b_6501 is a digital IO module for USB from National Instruments.
Unfortunately their Linux driver is excessively large (>60MB), difficult to install
and doesn't offer off-the-shelf support for python.
This python driver is based on Marc Schutz's pioneer work on c driver
(https://github.com/schuetzm/ni-usb-6501)
INSTALLATION
1. Install the latest PyUSB (at least version 1.0.a3) from http://sourcceforge.net/projects/pyusb/
2. Change the permissions of the USB device node by creating a udev rule.
e.g. add the following line (and file) to a file in /etc/udev/rules.d/usb.rules
SUBSYSTEM=="usb", ENV{DEVTYPE}=="usb_device", MODE="0664", GROUP="usbusers"
This will set the owner of the device node to root:usbusers rather than root:root
After that add user to the usbusers group for enabling access to the device.
adduser _<user>_ usbusers
(Make sure you have group usbusers)
...and you are good to go.
TODO
- Counter operations
"""
import usb.core
import usb.util
ID_VENDOR = 0x3923
ID_PRODUCT = 0x718a
def get_adapter(**kwargs):
"""
Returns NiUsb6501 handler if only single adapter is connected to PC.
Forwards all parameters to pyusb (http://pyusb.sourceforge.net/docs/1.0/tutorial.html)
"""
device = usb.core.find(idVendor=ID_VENDOR, idProduct=ID_PRODUCT, **kwargs)
if not device:
raise ValueError('Device not found')
return NiUsb6501(device)
"""
Returns NiUsb6501 handle for every adapter that is connected to PC.
Forwards all parameters to pyusb (http://pyusb.sourceforge.net/docs/1.0/tutorial.html)
"""
def find_adapters(**kwargs):
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT, **kwargs)
if not devices:
raise ValueError('Device not found')
return [NiUsb6501(dev) for dev in devices]
class NiUsb6501:
"""
Typical usage:
adapter = get_adapter()
adapter.set_io_mode(0b00000000, 0x11111111, 0x01010101) # one bit per port 1=write, 0=read
# start calling adapter.read_port(port) and adapter.write_port(port, values)
"""
def __init__(self, device):
""" used only internally via get_adapter() and find_adapters() """
self.device = device
cfg = self.device.get_active_configuration()
interface_number = cfg[(0,0)].bInterfaceNumber
if self.device.is_kernel_driver_active(interface_number):
self.device.detach_kernel_driver(interface_number)
self.device.set_configuration()
usb.util.dispose_resources(self.device)
def set_io_mode(self, port0, port1, port2):
"""
Set mode for every IO pin. PIN modes are given in three groups (bitmasks represented by integers)
bit = 0: read
bit = 1: write
"""
buf = list("\x02\x10\x00\x00\x00\x05\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00")
buf[6] = chr(port0)
buf[7] = chr(port1)
buf[8] = chr(port2)
buf = ''.join(buf)
return self.send_request(0x12, buf)
def read_port(self, port):
"""
Read the value from all read-mode pins from one of the 8 PIN ports
port is 0, 1 or 2
"""
buf = list("\x02\x10\x00\x00\x00\x03\x00\x00")
buf[6] = chr(port)
buf = ''.join(buf)
response = self.send_request(0x0e, buf)
self.packet_matches(response,
"\x00\x0c\x01\x00\x00\x00\x00\x02\x00\x03\x00\x00",
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\xff")
return ord(response[10])
def write_port(self, port, value):
"""
Write value to all write-mode pins in one of the 8 PIN ports
port is 0, 1 or 2
value is 8 bits represented by integer
"""
buf = list("\x02\x10\x00\x00\x00\x03\x00\x00\x03\x00\x00\x00")
buf[6] = chr(port)
buf[9] = chr(value)
buf = ''.join(buf)
response = self.send_request(0x0f, buf)
self.packet_matches(response,
"\x00\x08\x01\x00\x00\x00\x00\x02",
"\xff\xff\xff\xff\xff\xff\xff\xff")
return response
| false | true |
1c4743783996b7abc30c57bd8fb42268e001125c | 437 | py | Python | commerce/auctions/migrations/0007_comment_commenttitle.py | degerahmet/Auctions-Django-Project | d87ac8b730b9d7ab3d4892494be6ca5fd4fe11cb | [
"Apache-2.0"
] | null | null | null | commerce/auctions/migrations/0007_comment_commenttitle.py | degerahmet/Auctions-Django-Project | d87ac8b730b9d7ab3d4892494be6ca5fd4fe11cb | [
"Apache-2.0"
] | null | null | null | commerce/auctions/migrations/0007_comment_commenttitle.py | degerahmet/Auctions-Django-Project | d87ac8b730b9d7ab3d4892494be6ca5fd4fe11cb | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1 on 2020-08-21 15:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0006_auto_20200821_1746'),
]
operations = [
migrations.AddField(
model_name='comment',
name='commentTitle',
field=models.CharField(default=1, max_length=64),
preserve_default=False,
),
]
| 21.85 | 61 | 0.606407 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0006_auto_20200821_1746'),
]
operations = [
migrations.AddField(
model_name='comment',
name='commentTitle',
field=models.CharField(default=1, max_length=64),
preserve_default=False,
),
]
| true | true |
1c47458e561dbae5189c7cf50343fbf91ad93c07 | 132 | py | Python | web/views/auth/__init__.py | arrow2625/atxserver2 | 39280d0a2ec7d84c32005da919941935fd0632db | [
"MIT"
] | null | null | null | web/views/auth/__init__.py | arrow2625/atxserver2 | 39280d0a2ec7d84c32005da919941935fd0632db | [
"MIT"
] | null | null | null | web/views/auth/__init__.py | arrow2625/atxserver2 | 39280d0a2ec7d84c32005da919941935fd0632db | [
"MIT"
] | null | null | null | # coding: utf-8
#
from .openid import OpenIdMixin, AuthError
from .github import GithubOAuth2Mixin
from .google import GoogleMixin
| 18.857143 | 42 | 0.80303 |
from .openid import OpenIdMixin, AuthError
from .github import GithubOAuth2Mixin
from .google import GoogleMixin
| true | true |
1c4747390ffa6d1824d43557a023d590eb857e75 | 22,080 | py | Python | qiskit/ignis/verification/randomized_benchmarking/circuits.py | hodgestar/qiskit-ignis | 0e511df442e864cd0e06efcdd1db7b03c011168b | [
"Apache-2.0"
] | null | null | null | qiskit/ignis/verification/randomized_benchmarking/circuits.py | hodgestar/qiskit-ignis | 0e511df442e864cd0e06efcdd1db7b03c011168b | [
"Apache-2.0"
] | null | null | null | qiskit/ignis/verification/randomized_benchmarking/circuits.py | hodgestar/qiskit-ignis | 0e511df442e864cd0e06efcdd1db7b03c011168b | [
"Apache-2.0"
] | 1 | 2021-04-01T17:28:33.000Z | 2021-04-01T17:28:33.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# TODO(mtreinish): Remove these disables when implementation is finished
# pylint: disable=unused-argument,unnecessary-pass
"""
Generates randomized benchmarking sequences
"""
import copy
import numpy as np
import qiskit
from .Clifford import Clifford
from .clifford_utils import CliffordUtils as clutils
from .dihedral import CNOTDihedral
from .dihedral_utils import DihedralUtils as dutils
def handle_length_multiplier(length_multiplier, len_pattern,
is_purity=False):
"""
Check validity of length_multiplier.
In addition, transform it into a vector if it is a constant.
In case of purity rb the length multiplier should be None.
Args:
length_multiplier: length of the multiplier
len_pattern: length of the RB pattern
is_purity: True only for purity rb (default is False)
Returns:
length_multiplier
"""
if hasattr(length_multiplier, "__len__"):
if is_purity:
raise ValueError(
"In case of Purity RB the length multiplier should be None")
if len(length_multiplier) != len_pattern:
raise ValueError(
"Length mulitiplier must be the same length as the pattern")
length_multiplier = np.array(length_multiplier)
if length_multiplier.dtype != 'int' or (length_multiplier < 1).any():
raise ValueError("Invalid length multiplier")
else:
length_multiplier = np.ones(len_pattern, dtype='int')*length_multiplier
return length_multiplier
def check_pattern(pattern, is_purity=False):
"""
Verifies that the input pattern is valid
i.e., that each qubit appears at most once
In case of purity rb, checks that all simultaneous sequences have the same
dimension (e.g. only 1-qubit sequences, or only 2-qubit sequences etc.)
Args:
pattern: RB pattern
n_qubits: number of qubits
is_purity: True only for purity rb (default is False)
Raises:
ValueError: if the pattern is not valid
Return:
qlist: flat list of all the qubits in the pattern
maxqubit: the maximum qubit number
maxdim: the maximal dimension (maximal number of qubits
in all sequences)
"""
pattern_flat = []
pattern_dim = []
for pat in pattern:
pattern_flat.extend(pat)
pattern_dim.append(len(pat))
_, uni_counts = np.unique(np.array(pattern_flat), return_counts=True)
if (uni_counts > 1).any():
raise ValueError("Invalid pattern. Duplicate qubit index.")
dim_distinct = np.unique(pattern_dim)
if is_purity:
if len(dim_distinct) > 1:
raise ValueError("Invalid pattern for purity RB. \
All simultaneous sequences should have the \
same dimension.")
return pattern_flat, np.max(pattern_flat).item(), np.max(pattern_dim)
def calc_xdata(length_vector, length_multiplier):
"""
Calculate the set of sequences lengths
Args:
length_vector: vector length
length_multiplier: length of the multiplier of the vector length
Returns:
An array of sequences lengths
"""
xdata = []
for mult in length_multiplier:
xdata.append(np.array(length_vector)*mult)
return np.array(xdata)
def randomized_benchmarking_seq(nseeds=1, length_vector=None,
rb_pattern=None,
length_multiplier=1, seed_offset=0,
align_cliffs=False,
interleaved_gates=None,
is_purity=False,
group_gates=None):
"""Get a generic randomized benchmarking sequence
Args:
nseeds: number of seeds
length_vector: 'm' length vector of sequence lengths. Must be in
ascending order. RB sequences of increasing length grow on top of
the previous sequences.
rb_pattern: A list of the form [[i,j],[k],...] which will make
simultaneous RB sequences where
Qi,Qj are a 2Q RB sequence and Qk is a 1Q sequence, etc.
E.g. [[0,3],[2],[1]] would create RB sequences that are
2Q for Q0/Q3, 1Q for Q1+Q2
The number of qubits is the sum of the entries.
For 'regular' RB the qubit_pattern is just [[0]],[[0,1]].
length_multiplier: if this is an array it scales each rb_sequence by
the multiplier
seed_offset: What to start the seeds at (e.g. if we
want to add more seeds later)
align_cliffs: If true adds a barrier across all qubits in rb_pattern
after each set of elements, not necessarily Cliffords
(note: aligns after each increment of elements including the
length multiplier so if the multiplier is [1,3] it will barrier
after 1 element for the first pattern and 3 for the second).
interleaved_gates: A list of gates of elements that
will be interleaved (for interleaved randomized benchmarking)
The length of the list would equal the length of the rb_pattern.
is_purity: True only for purity rb (default is False)
group_gates: On which group (or gate set) we perform RB
(default is the Clifford group)
'0' or None or 'Clifford': Clifford group
'1' or 'CNOT-Dihedral' or 'Non-Clifford': CNOT-Dihedral group
Returns:
A tuple of different fields depending on inputs. The different fields
are:
* ``circuits``: list of lists of circuits for the rb sequences
(separate list for each seed)
* ``xdata``: the sequences lengths (with multiplier if applicable)
* ``circuits_interleaved`` `(only if interleaved_gates is not None)`:
list of lists of circuits for the interleaved rb sequences
(separate list for each seed)
* ``circuits_purity`` `(only if is_purity=True)`:
list of lists of lists of circuits for purity rb
(separate list for each seed and each of the 3^n circuits)
* ``npurity`` `(only if is_purity=True)`:
the number of purity rb circuits (per seed)
which equals to 3^n, where n is the dimension
"""
# Set modules (default is Clifford)
if group_gates is None or group_gates in ('0',
'Clifford',
'clifford'):
Gutils = clutils()
Ggroup = Clifford
rb_circ_type = 'rb'
group_gates_type = 0
elif group_gates in ('1', 'Non-Clifford',
'NonClifford'
'CNOTDihedral',
'CNOT-Dihedral'):
Gutils = dutils()
Ggroup = CNOTDihedral
rb_circ_type = 'rb_cnotdihedral'
group_gates_type = 1
else:
raise ValueError("Unknown group or set of gates.")
if rb_pattern is None:
rb_pattern = [[0]]
if length_vector is None:
length_vector = [1, 10, 20]
qlist_flat, n_q_max, max_dim = check_pattern(rb_pattern, is_purity)
length_multiplier = handle_length_multiplier(length_multiplier,
len(rb_pattern),
is_purity)
# number of purity rb circuits per seed
npurity = 3**max_dim
xdata = calc_xdata(length_vector, length_multiplier)
pattern_sizes = [len(pat) for pat in rb_pattern]
max_nrb = np.max(pattern_sizes)
# load group tables
group_tables = [[] for _ in range(max_nrb)]
for rb_num in range(max_nrb):
group_tables[rb_num] = Gutils.load_tables(rb_num+1)
# initialization: rb sequences
circuits = [[] for e in range(nseeds)]
# initialization: interleaved rb sequences
circuits_interleaved = [[] for e in range(nseeds)]
# initialization: non-clifford cnot-dihedral
# rb sequences
circuits_cnotdihedral = [[] for e in range(nseeds)]
# initialization: non-clifford cnot-dihedral
# interleaved rb sequences
circuits_cnotdihedral_interleaved = [[] for e in range(nseeds)]
# initialization: purity rb sequences
circuits_purity = [[[] for d in range(npurity)]
for e in range(nseeds)]
# go through for each seed
for seed in range(nseeds):
qr = qiskit.QuantumRegister(n_q_max+1, 'qr')
cr = qiskit.ClassicalRegister(len(qlist_flat), 'cr')
general_circ = qiskit.QuantumCircuit(qr, cr)
interleaved_circ = qiskit.QuantumCircuit(qr, cr)
# make sequences for each of the separate sequences in
# rb_pattern
Elmnts = []
for rb_q_num in pattern_sizes:
Elmnts.append(Ggroup(rb_q_num))
# Sequences for interleaved rb sequences
Elmnts_interleaved = []
for rb_q_num in pattern_sizes:
Elmnts_interleaved.append(Ggroup(rb_q_num))
# go through and add elements to RB sequences
length_index = 0
for elmnts_index in range(length_vector[-1]):
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
for _ in range(length_multiplier[rb_pattern_index]):
new_elmnt_gatelist = Gutils.random_gates(
rb_q_num)
Elmnts[rb_pattern_index] = Gutils.compose_gates(
Elmnts[rb_pattern_index], new_elmnt_gatelist)
general_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
# add a barrier
general_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
# interleaved rb sequences
if interleaved_gates is not None:
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
new_elmnt_gatelist)
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
interleaved_gates[rb_pattern_index])
# add a barrier - interleaved rb
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
# add a barrier - interleaved rb
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
if align_cliffs:
# if align at a barrier across all patterns
general_circ.barrier(
*[qr[x] for x in qlist_flat])
# align for interleaved rb
if interleaved_gates is not None:
interleaved_circ.barrier(
*[qr[x] for x in qlist_flat])
# if the number of elements matches one of the sequence lengths
# then calculate the inverse and produce the circuit
if (elmnts_index+1) == length_vector[length_index]:
# circ for rb:
circ = qiskit.QuantumCircuit(qr, cr)
circ += general_circ
# circ_interleaved for interleaved rb:
circ_interleaved = qiskit.QuantumCircuit(qr, cr)
circ_interleaved += interleaved_circ
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
inv_key = Gutils.find_key(Elmnts[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num-1][inv_key])
circ += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
# calculate the inverse and produce the circuit
# for interleaved rb
if interleaved_gates is not None:
inv_key = Gutils.find_key(Elmnts_interleaved
[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num - 1][inv_key])
circ_interleaved += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
# Circuits for purity rb
if is_purity:
circ_purity = [[] for d in range(npurity)]
for d in range(npurity):
circ_purity[d] = qiskit.QuantumCircuit(qr, cr)
circ_purity[d] += circ
circ_purity[d].name = rb_circ_type + '_purity_'
ind_d = d
purity_qubit_num = 0
while True:
# Per each qubit:
# do nothing or rx(pi/2) or ry(pi/2)
purity_qubit_rot = np.mod(ind_d, 3)
ind_d = np.floor_divide(ind_d, 3)
if purity_qubit_rot == 0: # do nothing
circ_purity[d].name += 'Z'
if purity_qubit_rot == 1: # add rx(pi/2)
for pat in rb_pattern:
circ_purity[d].rx(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'X'
if purity_qubit_rot == 2: # add ry(pi/2)
for pat in rb_pattern:
circ_purity[d].ry(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'Y'
purity_qubit_num = purity_qubit_num + 1
if ind_d == 0:
break
# padding the circuit name with Z's so that
# all circuits will have names of the same length
for _ in range(max_dim - purity_qubit_num):
circ_purity[d].name += 'Z'
# add measurement for purity rb
for qind, qb in enumerate(qlist_flat):
circ_purity[d].measure(qr[qb], cr[qind])
circ_purity[d].name += '_length_%d_seed_%d' \
% (length_index,
seed + seed_offset)
# add measurement for Non-Clifford cnot-dihedral rb
# measure both the ground state |0...0> (circ)
# and the |+...+> state (cnot-dihedral_circ)
cnotdihedral_circ = qiskit.QuantumCircuit(qr, cr)
cnotdihedral_interleaved_circ = qiskit.QuantumCircuit(qr, cr)
if group_gates_type == 1:
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.h(qr[qb])
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_circ += circ
cnotdihedral_interleaved_circ += circ_interleaved
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
for qind, qb in enumerate(qlist_flat):
cnotdihedral_circ.measure(qr[qb], cr[qind])
cnotdihedral_interleaved_circ.measure(qr[qb], cr[qind])
# add measurement for standard rb
# qubits measure to the c registers as
# they appear in the pattern
for qind, qb in enumerate(qlist_flat):
circ.measure(qr[qb], cr[qind])
# add measurement for interleaved rb
circ_interleaved.measure(qr[qb], cr[qind])
circ.name = \
rb_circ_type + '_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
if group_gates_type == 1:
circ.name = rb_circ_type + '_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_circ.name = \
rb_circ_type + '_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_interleaved_circ.name = \
rb_circ_type + 'interleaved_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circuits[seed].append(circ)
circuits_interleaved[seed].append(circ_interleaved)
circuits_cnotdihedral[seed].append(cnotdihedral_circ)
circuits_cnotdihedral_interleaved[seed].append(
cnotdihedral_interleaved_circ)
if is_purity:
for d in range(npurity):
circuits_purity[seed][d].append(circ_purity[d])
length_index += 1
# output of purity rb
if is_purity:
return circuits_purity, xdata, npurity
# output of non-clifford cnot-dihedral interleaved rb
if interleaved_gates is not None and group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral, circuits_interleaved, \
circuits_cnotdihedral_interleaved
# output of interleaved rb
if interleaved_gates is not None:
return circuits, xdata, circuits_interleaved
# output of Non-Clifford cnot-dihedral rb
if group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral
# output of standard (simultaneous) rb
return circuits, xdata
def replace_q_indices(circuit, q_nums, qr):
"""
Take a circuit that is ordered from 0,1,2 qubits and replace 0 with the
qubit label in the first index of q_nums, 1 with the second index...
Args:
circuit: circuit to operate on
q_nums: list of qubit indices
Returns:
updated circuit
"""
new_circuit = qiskit.QuantumCircuit(qr)
for instr, qargs, cargs in circuit.data:
new_qargs = [
qr[q_nums[x]] for x in [arg.index for arg in qargs]]
new_op = copy.deepcopy((instr, new_qargs, cargs))
new_circuit.data.append(new_op)
return new_circuit
def get_quantum_circuit(gatelist, num_qubits):
"""
Returns the circuit in the form of a QuantumCircuit object.
Args:
num_qubits: the number of qubits (dimension).
gatelist: a list of gates.
Returns:
A QuantumCircuit object.
"""
qr = qiskit.QuantumRegister(num_qubits)
qc = qiskit.QuantumCircuit(qr)
for op in gatelist:
split = op.split()
op_names = [split[0]]
# temporary correcting the ops name since QuantumCircuit has no
# attributes 'v' or 'w' yet:
if op_names == ['v']:
op_names = ['sdg', 'h']
elif op_names == ['w']:
op_names = ['h', 's']
if op_names == ['u1']:
qubits = [qr[int(x)] for x in split[2:]]
theta = float(split[1])
else:
qubits = [qr[int(x)] for x in split[1:]]
for sub_op in op_names:
operation = eval('qiskit.QuantumCircuit.' + sub_op)
if sub_op == 'u1':
operation(qc, theta, *qubits)
else:
operation(qc, *qubits)
return qc
| 42.217973 | 79 | 0.550634 |
import copy
import numpy as np
import qiskit
from .Clifford import Clifford
from .clifford_utils import CliffordUtils as clutils
from .dihedral import CNOTDihedral
from .dihedral_utils import DihedralUtils as dutils
def handle_length_multiplier(length_multiplier, len_pattern,
is_purity=False):
if hasattr(length_multiplier, "__len__"):
if is_purity:
raise ValueError(
"In case of Purity RB the length multiplier should be None")
if len(length_multiplier) != len_pattern:
raise ValueError(
"Length mulitiplier must be the same length as the pattern")
length_multiplier = np.array(length_multiplier)
if length_multiplier.dtype != 'int' or (length_multiplier < 1).any():
raise ValueError("Invalid length multiplier")
else:
length_multiplier = np.ones(len_pattern, dtype='int')*length_multiplier
return length_multiplier
def check_pattern(pattern, is_purity=False):
pattern_flat = []
pattern_dim = []
for pat in pattern:
pattern_flat.extend(pat)
pattern_dim.append(len(pat))
_, uni_counts = np.unique(np.array(pattern_flat), return_counts=True)
if (uni_counts > 1).any():
raise ValueError("Invalid pattern. Duplicate qubit index.")
dim_distinct = np.unique(pattern_dim)
if is_purity:
if len(dim_distinct) > 1:
raise ValueError("Invalid pattern for purity RB. \
All simultaneous sequences should have the \
same dimension.")
return pattern_flat, np.max(pattern_flat).item(), np.max(pattern_dim)
def calc_xdata(length_vector, length_multiplier):
xdata = []
for mult in length_multiplier:
xdata.append(np.array(length_vector)*mult)
return np.array(xdata)
def randomized_benchmarking_seq(nseeds=1, length_vector=None,
rb_pattern=None,
length_multiplier=1, seed_offset=0,
align_cliffs=False,
interleaved_gates=None,
is_purity=False,
group_gates=None):
if group_gates is None or group_gates in ('0',
'Clifford',
'clifford'):
Gutils = clutils()
Ggroup = Clifford
rb_circ_type = 'rb'
group_gates_type = 0
elif group_gates in ('1', 'Non-Clifford',
'NonClifford'
'CNOTDihedral',
'CNOT-Dihedral'):
Gutils = dutils()
Ggroup = CNOTDihedral
rb_circ_type = 'rb_cnotdihedral'
group_gates_type = 1
else:
raise ValueError("Unknown group or set of gates.")
if rb_pattern is None:
rb_pattern = [[0]]
if length_vector is None:
length_vector = [1, 10, 20]
qlist_flat, n_q_max, max_dim = check_pattern(rb_pattern, is_purity)
length_multiplier = handle_length_multiplier(length_multiplier,
len(rb_pattern),
is_purity)
npurity = 3**max_dim
xdata = calc_xdata(length_vector, length_multiplier)
pattern_sizes = [len(pat) for pat in rb_pattern]
max_nrb = np.max(pattern_sizes)
group_tables = [[] for _ in range(max_nrb)]
for rb_num in range(max_nrb):
group_tables[rb_num] = Gutils.load_tables(rb_num+1)
circuits = [[] for e in range(nseeds)]
circuits_interleaved = [[] for e in range(nseeds)]
circuits_cnotdihedral = [[] for e in range(nseeds)]
circuits_cnotdihedral_interleaved = [[] for e in range(nseeds)]
circuits_purity = [[[] for d in range(npurity)]
for e in range(nseeds)]
for seed in range(nseeds):
qr = qiskit.QuantumRegister(n_q_max+1, 'qr')
cr = qiskit.ClassicalRegister(len(qlist_flat), 'cr')
general_circ = qiskit.QuantumCircuit(qr, cr)
interleaved_circ = qiskit.QuantumCircuit(qr, cr)
Elmnts = []
for rb_q_num in pattern_sizes:
Elmnts.append(Ggroup(rb_q_num))
Elmnts_interleaved = []
for rb_q_num in pattern_sizes:
Elmnts_interleaved.append(Ggroup(rb_q_num))
length_index = 0
for elmnts_index in range(length_vector[-1]):
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
for _ in range(length_multiplier[rb_pattern_index]):
new_elmnt_gatelist = Gutils.random_gates(
rb_q_num)
Elmnts[rb_pattern_index] = Gutils.compose_gates(
Elmnts[rb_pattern_index], new_elmnt_gatelist)
general_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
general_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
if interleaved_gates is not None:
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
new_elmnt_gatelist)
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
interleaved_gates[rb_pattern_index])
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
if align_cliffs:
general_circ.barrier(
*[qr[x] for x in qlist_flat])
if interleaved_gates is not None:
interleaved_circ.barrier(
*[qr[x] for x in qlist_flat])
if (elmnts_index+1) == length_vector[length_index]:
circ = qiskit.QuantumCircuit(qr, cr)
circ += general_circ
circ_interleaved = qiskit.QuantumCircuit(qr, cr)
circ_interleaved += interleaved_circ
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
inv_key = Gutils.find_key(Elmnts[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num-1][inv_key])
circ += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
if interleaved_gates is not None:
inv_key = Gutils.find_key(Elmnts_interleaved
[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num - 1][inv_key])
circ_interleaved += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
if is_purity:
circ_purity = [[] for d in range(npurity)]
for d in range(npurity):
circ_purity[d] = qiskit.QuantumCircuit(qr, cr)
circ_purity[d] += circ
circ_purity[d].name = rb_circ_type + '_purity_'
ind_d = d
purity_qubit_num = 0
while True:
purity_qubit_rot = np.mod(ind_d, 3)
ind_d = np.floor_divide(ind_d, 3)
if purity_qubit_rot == 0:
circ_purity[d].name += 'Z'
if purity_qubit_rot == 1:
for pat in rb_pattern:
circ_purity[d].rx(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'X'
if purity_qubit_rot == 2:
for pat in rb_pattern:
circ_purity[d].ry(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'Y'
purity_qubit_num = purity_qubit_num + 1
if ind_d == 0:
break
# all circuits will have names of the same length
for _ in range(max_dim - purity_qubit_num):
circ_purity[d].name += 'Z'
# add measurement for purity rb
for qind, qb in enumerate(qlist_flat):
circ_purity[d].measure(qr[qb], cr[qind])
circ_purity[d].name += '_length_%d_seed_%d' \
% (length_index,
seed + seed_offset)
# add measurement for Non-Clifford cnot-dihedral rb
# measure both the ground state |0...0> (circ)
# and the |+...+> state (cnot-dihedral_circ)
cnotdihedral_circ = qiskit.QuantumCircuit(qr, cr)
cnotdihedral_interleaved_circ = qiskit.QuantumCircuit(qr, cr)
if group_gates_type == 1:
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.h(qr[qb])
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_circ += circ
cnotdihedral_interleaved_circ += circ_interleaved
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
for qind, qb in enumerate(qlist_flat):
cnotdihedral_circ.measure(qr[qb], cr[qind])
cnotdihedral_interleaved_circ.measure(qr[qb], cr[qind])
# add measurement for standard rb
# qubits measure to the c registers as
# they appear in the pattern
for qind, qb in enumerate(qlist_flat):
circ.measure(qr[qb], cr[qind])
# add measurement for interleaved rb
circ_interleaved.measure(qr[qb], cr[qind])
circ.name = \
rb_circ_type + '_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
if group_gates_type == 1:
circ.name = rb_circ_type + '_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_circ.name = \
rb_circ_type + '_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_interleaved_circ.name = \
rb_circ_type + 'interleaved_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circuits[seed].append(circ)
circuits_interleaved[seed].append(circ_interleaved)
circuits_cnotdihedral[seed].append(cnotdihedral_circ)
circuits_cnotdihedral_interleaved[seed].append(
cnotdihedral_interleaved_circ)
if is_purity:
for d in range(npurity):
circuits_purity[seed][d].append(circ_purity[d])
length_index += 1
# output of purity rb
if is_purity:
return circuits_purity, xdata, npurity
# output of non-clifford cnot-dihedral interleaved rb
if interleaved_gates is not None and group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral, circuits_interleaved, \
circuits_cnotdihedral_interleaved
# output of interleaved rb
if interleaved_gates is not None:
return circuits, xdata, circuits_interleaved
# output of Non-Clifford cnot-dihedral rb
if group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral
# output of standard (simultaneous) rb
return circuits, xdata
def replace_q_indices(circuit, q_nums, qr):
new_circuit = qiskit.QuantumCircuit(qr)
for instr, qargs, cargs in circuit.data:
new_qargs = [
qr[q_nums[x]] for x in [arg.index for arg in qargs]]
new_op = copy.deepcopy((instr, new_qargs, cargs))
new_circuit.data.append(new_op)
return new_circuit
def get_quantum_circuit(gatelist, num_qubits):
qr = qiskit.QuantumRegister(num_qubits)
qc = qiskit.QuantumCircuit(qr)
for op in gatelist:
split = op.split()
op_names = [split[0]]
# temporary correcting the ops name since QuantumCircuit has no
# attributes 'v' or 'w' yet:
if op_names == ['v']:
op_names = ['sdg', 'h']
elif op_names == ['w']:
op_names = ['h', 's']
if op_names == ['u1']:
qubits = [qr[int(x)] for x in split[2:]]
theta = float(split[1])
else:
qubits = [qr[int(x)] for x in split[1:]]
for sub_op in op_names:
operation = eval('qiskit.QuantumCircuit.' + sub_op)
if sub_op == 'u1':
operation(qc, theta, *qubits)
else:
operation(qc, *qubits)
return qc
| true | true |
1c4748aa711a339da4d0853a24e1a562118a999c | 1,347 | py | Python | bokchoy/utils/log.py | ulule/bokchoy | 58afaf325ce275edf5c4a955379afb1cc5eb5de3 | [
"MIT"
] | null | null | null | bokchoy/utils/log.py | ulule/bokchoy | 58afaf325ce275edf5c4a955379afb1cc5eb5de3 | [
"MIT"
] | null | null | null | bokchoy/utils/log.py | ulule/bokchoy | 58afaf325ce275edf5c4a955379afb1cc5eb5de3 | [
"MIT"
] | null | null | null | import six
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
def logger_isa(l, p, max=1000):
this, seen = l, set()
for _ in range(max):
if this == p:
return True
else:
if this in seen:
raise RuntimeError(
'Logger {0!r} parents recursive'.format(l),
)
seen.add(this)
this = this.parent
if not this:
break
else: # pragma: no cover
raise RuntimeError('Logger hierarchy exceeds {0}'.format(max))
return False
def _get_logger(logger):
if isinstance(logger, six.string_types):
logger = logging.getLogger(logger)
if not logger.handlers:
logger.addHandler(NullHandler())
return logger
def get_logger(name):
l = _get_logger(name)
if logging.root not in (l, l.parent) and l is not base_logger:
if not logger_isa(l, base_logger): # pragma: no cover
l.parent = base_logger
return l
base_logger = logger = _get_logger('bokchoy')
task_logger = get_logger('bokchoy.task')
worker_logger = get_logger('bokchoy.worker')
def get_task_logger(name):
logger = get_logger(name)
if not logger_isa(logger, task_logger):
logger.parent = task_logger
return logger
| 22.081967 | 70 | 0.603563 | import six
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
def logger_isa(l, p, max=1000):
this, seen = l, set()
for _ in range(max):
if this == p:
return True
else:
if this in seen:
raise RuntimeError(
'Logger {0!r} parents recursive'.format(l),
)
seen.add(this)
this = this.parent
if not this:
break
else:
raise RuntimeError('Logger hierarchy exceeds {0}'.format(max))
return False
def _get_logger(logger):
if isinstance(logger, six.string_types):
logger = logging.getLogger(logger)
if not logger.handlers:
logger.addHandler(NullHandler())
return logger
def get_logger(name):
l = _get_logger(name)
if logging.root not in (l, l.parent) and l is not base_logger:
if not logger_isa(l, base_logger):
l.parent = base_logger
return l
base_logger = logger = _get_logger('bokchoy')
task_logger = get_logger('bokchoy.task')
worker_logger = get_logger('bokchoy.worker')
def get_task_logger(name):
logger = get_logger(name)
if not logger_isa(logger, task_logger):
logger.parent = task_logger
return logger
| true | true |
1c474bb0722209c98d256697379ddc9a21064447 | 14,683 | py | Python | salt/cloud/clouds/vultrpy.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | salt/cloud/clouds/vultrpy.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | salt/cloud/clouds/vultrpy.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # -*- coding: utf-8 -*-
'''
Vultr Cloud Module using python-vultr bindings
==============================================
.. versionadded:: 2016.3.0
The Vultr cloud module is used to control access to the Vultr VPS system.
Use of this module only requires the ``api_key`` parameter.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/vultr.conf``:
.. code-block:: yaml
my-vultr-config:
# Vultr account api key
api_key: <supersecretapi_key>
driver: vultr
Set up the cloud profile at ``/etc/salt/cloud.profiles`` or
``/etc/salt/cloud.profiles.d/vultr.conf``:
.. code-block:: yaml
nyc-4gb-4cpu-ubuntu-14-04:
location: 1
provider: my-vultr-config
image: 160
size: 95
enable_private_network: True
This driver also supports Vultr's `startup script` feature. You can list startup
scripts in your account with
.. code-block:: bash
salt-cloud -f list_scripts <name of vultr provider>
That list will include the IDs of the scripts in your account. Thus, if you
have a script called 'setup-networking' with an ID of 493234 you can specify
that startup script in a profile like so:
.. code-block:: yaml
nyc-2gb-1cpu-ubuntu-17-04:
location: 1
provider: my-vultr-config
image: 223
size: 13
startup_script_id: 493234
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode # pylint: disable=E0611
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'vultr'
DETAILS = {}
def __virtual__():
'''
Set up the Vultr functions and check for configurations
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'vultr',
('api_key',)
)
def _cache_provider_details(conn=None):
'''
Provide a place to hang onto results of --list-[locations|sizes|images]
so we don't have to go out to the API and get them every time.
'''
DETAILS['avail_locations'] = {}
DETAILS['avail_sizes'] = {}
DETAILS['avail_images'] = {}
locations = avail_locations(conn)
images = avail_images(conn)
sizes = avail_sizes(conn)
for key, location in six.iteritems(locations):
DETAILS['avail_locations'][location['name']] = location
DETAILS['avail_locations'][key] = location
for key, image in six.iteritems(images):
DETAILS['avail_images'][image['name']] = image
DETAILS['avail_images'][key] = image
for key, vm_size in six.iteritems(sizes):
DETAILS['avail_sizes'][vm_size['name']] = vm_size
DETAILS['avail_sizes'][key] = vm_size
def avail_locations(conn=None):
'''
return available datacenter locations
'''
return _query('regions/list')
def avail_scripts(conn=None):
'''
return available startup scripts
'''
return _query('startupscript/list')
def list_scripts(conn=None, call=None):
'''
return list of Startup Scripts
'''
return avail_scripts()
def avail_sizes(conn=None):
'''
Return available sizes ("plans" in VultrSpeak)
'''
return _query('plans/list')
def avail_images(conn=None):
'''
Return available images
'''
return _query('os/list')
def list_nodes(**kwargs):
'''
Return basic data on nodes
'''
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {}
for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips':
ret[node][prop] = nodes[node][prop]
return ret
def list_nodes_full(**kwargs):
'''
Return all data on nodes
'''
nodes = _query('server/list')
ret = {}
for node in nodes:
name = nodes[node]['label']
ret[name] = nodes[node].copy()
ret[name]['id'] = node
ret[name]['image'] = nodes[node]['os']
ret[name]['size'] = nodes[node]['VPSPLANID']
ret[name]['state'] = nodes[node]['status']
ret[name]['private_ips'] = nodes[node]['internal_ip']
ret[name]['public_ips'] = nodes[node]['main_ip']
return ret
def list_nodes_select(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def destroy(name):
'''
Remove a node from Vultr
'''
node = show_instance(name, call='action')
params = {'SUBID': node['SUBID']}
result = _query('server/destroy', method='POST', decode=False, data=_urlencode(params))
# The return of a destroy call is empty in the case of a success.
# Errors are only indicated via HTTP status code. Status code 200
# effetively therefore means "success".
if result.get('body') == '' and result.get('text') == '':
return True
return result
def stop(*args, **kwargs):
'''
Execute a "stop" action on a VM
'''
return _query('server/halt')
def start(*args, **kwargs):
'''
Execute a "start" action on a VM
'''
return _query('server/start')
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
# Find under which cloud service the name is listed, if any
if name not in nodes:
return {}
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def _lookup_vultrid(which_key, availkey, keyname):
'''
Helper function to retrieve a Vultr ID
'''
if DETAILS == {}:
_cache_provider_details()
which_key = six.text_type(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
if 'driver' not in vm_:
vm_['driver'] = vm_['provider']
private_networking = config.get_cloud_config_value(
'enable_private_network', vm_, __opts__, search_global=False, default=False,
)
startup_script = config.get_cloud_config_value(
'startup_script_id', vm_, __opts__, search_global=False, default=None,
)
if startup_script and str(startup_script) not in avail_scripts():
log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script))
return False
if private_networking is not None:
if not isinstance(private_networking, bool):
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
if private_networking is True:
enable_private_network = 'yes'
else:
enable_private_network = 'no'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name %s', vm_['image'])
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name %s', vm_['size'])
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name %s', vm_['location'])
return False
kwargs = {
'label': vm_['name'],
'OSID': osid,
'VPSPLANID': vpsplanid,
'DCID': dcid,
'hostname': vm_['name'],
'enable_private_network': enable_private_network,
}
if startup_script:
kwargs['SCRIPTID'] = startup_script
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
try:
data = _query('server/create', method='POST', data=_urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error(
'Error creating %s on Vultr\n\n'
'Vultr API returned %s\n', vm_['name'], data
)
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
except Exception as exc: # pylint: disable=broad-except
log.error(
'Error creating %s on Vultr\n\n'
'The following exception was thrown when trying to '
'run the initial deployment:\n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
def wait_for_hostname():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
main_ip = six.text_type(data.get('main_ip', '0'))
if main_ip.startswith('0'):
time.sleep(3)
return False
return data['main_ip']
def wait_for_default_password():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for default password")
# pprint.pprint(data)
if six.text_type(data.get('default_password', '')) == '':
time.sleep(1)
return False
return data['default_password']
def wait_for_status():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for status normal")
# pprint.pprint(data)
if six.text_type(data.get('status', '')) != 'active':
time.sleep(1)
return False
return data['default_password']
def wait_for_server_state():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for server state ok")
# pprint.pprint(data)
if six.text_type(data.get('server_state', '')) != 'ok':
time.sleep(1)
return False
return data['default_password']
vm_['ssh_host'] = __utils__['cloud.wait_for_fun'](
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
vm_['password'] = __utils__['cloud.wait_for_fun'](
wait_for_default_password,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_status,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_server_state,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__opts__['hard_timeout'] = config.get_cloud_config_value(
'hard_timeout',
get_configured_provider(),
__opts__,
search_global=False,
default=None,
)
# Bootstrap
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(show_instance(vm_['name'], call='action'))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _query(path, method='GET', data=None, params=None, header_dict=None, decode=True):
'''
Perform a query directly against the Vultr REST API
'''
api_key = config.get_cloud_config_value(
'api_key',
get_configured_provider(),
__opts__,
search_global=False,
)
management_host = config.get_cloud_config_value(
'management_host',
get_configured_provider(),
__opts__,
search_global=False,
default='api.vultr.com'
)
url = 'https://{management_host}/v1/{path}?api_key={api_key}'.format(
management_host=management_host,
path=path,
api_key=api_key,
)
if header_dict is None:
header_dict = {}
result = __utils__['http.query'](
url,
method=method,
params=params,
data=data,
header_dict=header_dict,
port=443,
text=True,
decode=decode,
decode_type='json',
hide_fields=['api_key'],
opts=__opts__,
)
if 'dict' in result:
return result['dict']
return result
| 28.236538 | 105 | 0.60914 |
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
import time
import salt.config as config
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudSystemExit
)
log = logging.getLogger(__name__)
__virtualname__ = 'vultr'
DETAILS = {}
def __virtual__():
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'vultr',
('api_key',)
)
def _cache_provider_details(conn=None):
DETAILS['avail_locations'] = {}
DETAILS['avail_sizes'] = {}
DETAILS['avail_images'] = {}
locations = avail_locations(conn)
images = avail_images(conn)
sizes = avail_sizes(conn)
for key, location in six.iteritems(locations):
DETAILS['avail_locations'][location['name']] = location
DETAILS['avail_locations'][key] = location
for key, image in six.iteritems(images):
DETAILS['avail_images'][image['name']] = image
DETAILS['avail_images'][key] = image
for key, vm_size in six.iteritems(sizes):
DETAILS['avail_sizes'][vm_size['name']] = vm_size
DETAILS['avail_sizes'][key] = vm_size
def avail_locations(conn=None):
return _query('regions/list')
def avail_scripts(conn=None):
return _query('startupscript/list')
def list_scripts(conn=None, call=None):
return avail_scripts()
def avail_sizes(conn=None):
return _query('plans/list')
def avail_images(conn=None):
return _query('os/list')
def list_nodes(**kwargs):
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {}
for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips':
ret[node][prop] = nodes[node][prop]
return ret
def list_nodes_full(**kwargs):
nodes = _query('server/list')
ret = {}
for node in nodes:
name = nodes[node]['label']
ret[name] = nodes[node].copy()
ret[name]['id'] = node
ret[name]['image'] = nodes[node]['os']
ret[name]['size'] = nodes[node]['VPSPLANID']
ret[name]['state'] = nodes[node]['status']
ret[name]['private_ips'] = nodes[node]['internal_ip']
ret[name]['public_ips'] = nodes[node]['main_ip']
return ret
def list_nodes_select(conn=None, call=None):
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def destroy(name):
node = show_instance(name, call='action')
params = {'SUBID': node['SUBID']}
result = _query('server/destroy', method='POST', decode=False, data=_urlencode(params))
if result.get('body') == '' and result.get('text') == '':
return True
return result
def stop(*args, **kwargs):
return _query('server/halt')
def start(*args, **kwargs):
return _query('server/start')
def show_instance(name, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
if name not in nodes:
return {}
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def _lookup_vultrid(which_key, availkey, keyname):
if DETAILS == {}:
_cache_provider_details()
which_key = six.text_type(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
return False
def create(vm_):
if 'driver' not in vm_:
vm_['driver'] = vm_['provider']
private_networking = config.get_cloud_config_value(
'enable_private_network', vm_, __opts__, search_global=False, default=False,
)
startup_script = config.get_cloud_config_value(
'startup_script_id', vm_, __opts__, search_global=False, default=None,
)
if startup_script and str(startup_script) not in avail_scripts():
log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script))
return False
if private_networking is not None:
if not isinstance(private_networking, bool):
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
if private_networking is True:
enable_private_network = 'yes'
else:
enable_private_network = 'no'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name %s', vm_['image'])
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name %s', vm_['size'])
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name %s', vm_['location'])
return False
kwargs = {
'label': vm_['name'],
'OSID': osid,
'VPSPLANID': vpsplanid,
'DCID': dcid,
'hostname': vm_['name'],
'enable_private_network': enable_private_network,
}
if startup_script:
kwargs['SCRIPTID'] = startup_script
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
try:
data = _query('server/create', method='POST', data=_urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error(
'Error creating %s on Vultr\n\n'
'Vultr API returned %s\n', vm_['name'], data
)
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
except Exception as exc:
log.error(
'Error creating %s on Vultr\n\n'
'The following exception was thrown when trying to '
'run the initial deployment:\n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
def wait_for_hostname():
data = show_instance(vm_['name'], call='action')
main_ip = six.text_type(data.get('main_ip', '0'))
if main_ip.startswith('0'):
time.sleep(3)
return False
return data['main_ip']
def wait_for_default_password():
data = show_instance(vm_['name'], call='action')
if six.text_type(data.get('default_password', '')) == '':
time.sleep(1)
return False
return data['default_password']
def wait_for_status():
data = show_instance(vm_['name'], call='action')
if six.text_type(data.get('status', '')) != 'active':
time.sleep(1)
return False
return data['default_password']
def wait_for_server_state():
data = show_instance(vm_['name'], call='action')
if six.text_type(data.get('server_state', '')) != 'ok':
time.sleep(1)
return False
return data['default_password']
vm_['ssh_host'] = __utils__['cloud.wait_for_fun'](
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
vm_['password'] = __utils__['cloud.wait_for_fun'](
wait_for_default_password,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_status,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_server_state,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__opts__['hard_timeout'] = config.get_cloud_config_value(
'hard_timeout',
get_configured_provider(),
__opts__,
search_global=False,
default=None,
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(show_instance(vm_['name'], call='action'))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _query(path, method='GET', data=None, params=None, header_dict=None, decode=True):
api_key = config.get_cloud_config_value(
'api_key',
get_configured_provider(),
__opts__,
search_global=False,
)
management_host = config.get_cloud_config_value(
'management_host',
get_configured_provider(),
__opts__,
search_global=False,
default='api.vultr.com'
)
url = 'https://{management_host}/v1/{path}?api_key={api_key}'.format(
management_host=management_host,
path=path,
api_key=api_key,
)
if header_dict is None:
header_dict = {}
result = __utils__['http.query'](
url,
method=method,
params=params,
data=data,
header_dict=header_dict,
port=443,
text=True,
decode=decode,
decode_type='json',
hide_fields=['api_key'],
opts=__opts__,
)
if 'dict' in result:
return result['dict']
return result
| true | true |
1c474bcbae3af33fdc44d18a2aa1c4f0fe87dcdd | 7,974 | py | Python | scripts/process_perspective.py | dbckz/crossing-the-line | c5debb20e263e03eab9188ce7229753034939964 | [
"MIT"
] | 1 | 2022-02-14T17:11:30.000Z | 2022-02-14T17:11:30.000Z | scripts/process_perspective.py | dbckz/crossing-the-line | c5debb20e263e03eab9188ce7229753034939964 | [
"MIT"
] | null | null | null | scripts/process_perspective.py | dbckz/crossing-the-line | c5debb20e263e03eab9188ce7229753034939964 | [
"MIT"
] | null | null | null | """
Script to evaluate tweets against the Perspective API
How it's used:
* Loads "tweets.csv" files according to 'root_path' and 'day_paths' vars
* Sends one tweet at a time to the API
* Sleeps for 1 second between requests due to API rate-limit
* Appends results to perspective_processed_tweets.csv after every 50 tweets, so that not all progress is lost if the
script were to die midway through processing a file
"""
import os
import time
import numpy as np
import pandas as pd
from googleapiclient import discovery
def get_perspective_client(api_key):
return discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=api_key,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
def query_perspective(client, text, tweet_id, logfile):
analyze_request = {
'comment': {
'text': text
},
'requestedAttributes': {
'TOXICITY': {},
'SEVERE_TOXICITY': {},
'IDENTITY_ATTACK': {},
'INSULT': {},
'THREAT': {},
'SEXUALLY_EXPLICIT': {}
}
}
try:
response = client.comments().analyze(body=analyze_request).execute()
toxicity_score = response['attributeScores']['TOXICITY']['summaryScore']['value']
severe_toxicity_score = response['attributeScores']['SEVERE_TOXICITY']['summaryScore']['value']
identity_attack_score = response['attributeScores']['IDENTITY_ATTACK']['summaryScore']['value']
insult_score = response['attributeScores']['INSULT']['summaryScore']['value']
threat_score = response['attributeScores']['THREAT']['summaryScore']['value']
sexually_explicit_score = response['attributeScores']['SEXUALLY_EXPLICIT']['summaryScore']['value']
return {
"toxicity_score": toxicity_score,
"severe_toxicity_score": severe_toxicity_score,
"identity_attack_score": identity_attack_score,
"insult_score": insult_score,
"threat_score": threat_score,
"sexually_explicit_score": sexually_explicit_score,
"error": ""
}
except Exception as e:
with open(logfile, 'a') as f:
f.write(f"{time.ctime()}: EXCEPTION. Tweet Id: {tweet_id}: {e}")
f.write('\n')
print(f"EXCEPTION. Tweet Id: {tweet_id}: {e}")
if ('reason' in e.error_details[0] and e.error_details[0]['reason'] == 'RATE_LIMIT_EXCEEDED'):
with open(logfile, 'a') as f:
sleeptime = 70
f.write(f"{time.ctime()}: Sleeping for {sleeptime} seconds")
f.write('\n')
print(f"Sleeping for {sleeptime} seconds")
time.sleep(70)
return query_perspective(client, text, tweet_id, logfile)
return {
"toxicity_score": -1,
"severe_toxicity_score": -1,
"identity_attack_score": -1,
"insult_score": -1,
"threat_score": -1,
"sexually_explicit_score": -1,
"error": "ERROR"
}
def process_tweet(tweet, perspective_client, output_dataframe, logfile):
data = query_perspective(perspective_client, tweet['tweet_text'], tweet['tweet_id'], logfile)
output_dataframe.loc[tweet['tweet_id']] = [
tweet['tweet_id'],
data['toxicity_score'],
data['severe_toxicity_score'],
data['identity_attack_score'],
data['insult_score'],
data['threat_score'],
data['sexually_explicit_score'],
data['error']
]
def process_day(directory):
logfile = directory + "/perspective_error_log.txt"
progress_logfile = directory + "/perspective_progress_log.txt"
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Starting processing for {directory}")
f.write('\n')
print(f"Starting processing for {directory}")
# Load tweet CSV file
in_csv = directory + "/tweets.csv"
out_csv = directory + "/perspective_processed_tweets.csv"
# Delete existing output file if it exists
if os.path.exists(out_csv):
os.remove(out_csv)
number_lines = sum(1 for row in (open(in_csv)))
chunk_size = 50
tweets_remaining = number_lines - 1
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Number of tweets: {tweets_remaining}")
f.write('\n')
print(f"Number of tweets: {tweets_remaining}")
for i in range(0, number_lines, chunk_size):
start = time.time()
in_tweets = pd.read_csv(in_csv,
header=0,
nrows=chunk_size, # number of rows to read at each loop
skiprows=range(1, i)) # skip rows that have been read
if (i == 0):
print(f"Loaded first {len(in_tweets.index)} tweets.")
out_tweets = pd.DataFrame(
columns=["tweet_id", "toxicity_score", "severe_toxicity_score", "identity_attack_score", "insult_score",
"threat_score", "sexually_explicit_score", "error"])
# Do processing for tweet
for _, row in in_tweets.iterrows():
process_tweet(row, perspective_client, out_tweets, logfile)
time.sleep(1) # Sleep due to 1 req/second limit on Perspective API
# Ensure tweet_id written as int
new_dtypes = {
"tweet_id": int,
"toxicity_score": np.float64,
"severe_toxicity_score": np.float64,
"identity_attack_score": np.float64,
"insult_score": np.float64,
"threat_score": np.float64,
"sexually_explicit_score": np.float64,
"error": str
}
out_tweets = out_tweets.astype(new_dtypes)
if (i == 0):
out_tweets.to_csv(out_csv,
index=False,
header=True,
mode='a', # append data to csv file
chunksize=chunk_size) # size of data to append for each loop
else:
out_tweets.to_csv(out_csv,
index=False,
header=False,
mode='a', # append data to csv file
chunksize=chunk_size) # size of data to append for each loop
tweets_remaining = tweets_remaining - len(out_tweets.index)
msg = f"Processed {len(out_tweets.index)} tweets in {time.time() - start} seconds. {tweets_remaining} tweets remaining."
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: {msg}")
f.write('\n')
print(msg)
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Completed processing for {directory}")
f.write('\n')
print(f"Completed processing for {directory}")
if __name__ == "__main__":
root_path = "/Users/davebuckley/Documents/Kings/Dissertation/dissertation/data_collection"
day_paths = [
"/01",
"/02",
"/03",
"/04",
"/05",
"/06",
"/07",
"/08",
"/09",
"/10",
"/11",
"/12",
"/13",
"/14",
"/15",
"/16",
"/17",
"/18",
"/19",
"/20",
"/21",
"/22",
"/23",
"/24",
"/25",
"/26",
"/27",
"/28",
"/29",
"/30",
"/31",
"/32",
"/33",
"/34",
"/35",
"/36"
]
# Auth to Perspective API
print("Connecting to Perspective API")
API_KEY = os.getenv("PERSPECTIVE_API_KEY")
perspective_client = get_perspective_client(API_KEY)
print("Connected to Perspective API")
for day in day_paths:
process_day(root_path + day)
print("All completed")
| 34.37069 | 128 | 0.568096 | import os
import time
import numpy as np
import pandas as pd
from googleapiclient import discovery
def get_perspective_client(api_key):
return discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=api_key,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
def query_perspective(client, text, tweet_id, logfile):
analyze_request = {
'comment': {
'text': text
},
'requestedAttributes': {
'TOXICITY': {},
'SEVERE_TOXICITY': {},
'IDENTITY_ATTACK': {},
'INSULT': {},
'THREAT': {},
'SEXUALLY_EXPLICIT': {}
}
}
try:
response = client.comments().analyze(body=analyze_request).execute()
toxicity_score = response['attributeScores']['TOXICITY']['summaryScore']['value']
severe_toxicity_score = response['attributeScores']['SEVERE_TOXICITY']['summaryScore']['value']
identity_attack_score = response['attributeScores']['IDENTITY_ATTACK']['summaryScore']['value']
insult_score = response['attributeScores']['INSULT']['summaryScore']['value']
threat_score = response['attributeScores']['THREAT']['summaryScore']['value']
sexually_explicit_score = response['attributeScores']['SEXUALLY_EXPLICIT']['summaryScore']['value']
return {
"toxicity_score": toxicity_score,
"severe_toxicity_score": severe_toxicity_score,
"identity_attack_score": identity_attack_score,
"insult_score": insult_score,
"threat_score": threat_score,
"sexually_explicit_score": sexually_explicit_score,
"error": ""
}
except Exception as e:
with open(logfile, 'a') as f:
f.write(f"{time.ctime()}: EXCEPTION. Tweet Id: {tweet_id}: {e}")
f.write('\n')
print(f"EXCEPTION. Tweet Id: {tweet_id}: {e}")
if ('reason' in e.error_details[0] and e.error_details[0]['reason'] == 'RATE_LIMIT_EXCEEDED'):
with open(logfile, 'a') as f:
sleeptime = 70
f.write(f"{time.ctime()}: Sleeping for {sleeptime} seconds")
f.write('\n')
print(f"Sleeping for {sleeptime} seconds")
time.sleep(70)
return query_perspective(client, text, tweet_id, logfile)
return {
"toxicity_score": -1,
"severe_toxicity_score": -1,
"identity_attack_score": -1,
"insult_score": -1,
"threat_score": -1,
"sexually_explicit_score": -1,
"error": "ERROR"
}
def process_tweet(tweet, perspective_client, output_dataframe, logfile):
data = query_perspective(perspective_client, tweet['tweet_text'], tweet['tweet_id'], logfile)
output_dataframe.loc[tweet['tweet_id']] = [
tweet['tweet_id'],
data['toxicity_score'],
data['severe_toxicity_score'],
data['identity_attack_score'],
data['insult_score'],
data['threat_score'],
data['sexually_explicit_score'],
data['error']
]
def process_day(directory):
logfile = directory + "/perspective_error_log.txt"
progress_logfile = directory + "/perspective_progress_log.txt"
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Starting processing for {directory}")
f.write('\n')
print(f"Starting processing for {directory}")
in_csv = directory + "/tweets.csv"
out_csv = directory + "/perspective_processed_tweets.csv"
if os.path.exists(out_csv):
os.remove(out_csv)
number_lines = sum(1 for row in (open(in_csv)))
chunk_size = 50
tweets_remaining = number_lines - 1
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Number of tweets: {tweets_remaining}")
f.write('\n')
print(f"Number of tweets: {tweets_remaining}")
for i in range(0, number_lines, chunk_size):
start = time.time()
in_tweets = pd.read_csv(in_csv,
header=0,
nrows=chunk_size,
skiprows=range(1, i))
if (i == 0):
print(f"Loaded first {len(in_tweets.index)} tweets.")
out_tweets = pd.DataFrame(
columns=["tweet_id", "toxicity_score", "severe_toxicity_score", "identity_attack_score", "insult_score",
"threat_score", "sexually_explicit_score", "error"])
for _, row in in_tweets.iterrows():
process_tweet(row, perspective_client, out_tweets, logfile)
time.sleep(1)
new_dtypes = {
"tweet_id": int,
"toxicity_score": np.float64,
"severe_toxicity_score": np.float64,
"identity_attack_score": np.float64,
"insult_score": np.float64,
"threat_score": np.float64,
"sexually_explicit_score": np.float64,
"error": str
}
out_tweets = out_tweets.astype(new_dtypes)
if (i == 0):
out_tweets.to_csv(out_csv,
index=False,
header=True,
mode='a',
chunksize=chunk_size)
else:
out_tweets.to_csv(out_csv,
index=False,
header=False,
mode='a',
chunksize=chunk_size)
tweets_remaining = tweets_remaining - len(out_tweets.index)
msg = f"Processed {len(out_tweets.index)} tweets in {time.time() - start} seconds. {tweets_remaining} tweets remaining."
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: {msg}")
f.write('\n')
print(msg)
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Completed processing for {directory}")
f.write('\n')
print(f"Completed processing for {directory}")
if __name__ == "__main__":
root_path = "/Users/davebuckley/Documents/Kings/Dissertation/dissertation/data_collection"
day_paths = [
"/01",
"/02",
"/03",
"/04",
"/05",
"/06",
"/07",
"/08",
"/09",
"/10",
"/11",
"/12",
"/13",
"/14",
"/15",
"/16",
"/17",
"/18",
"/19",
"/20",
"/21",
"/22",
"/23",
"/24",
"/25",
"/26",
"/27",
"/28",
"/29",
"/30",
"/31",
"/32",
"/33",
"/34",
"/35",
"/36"
]
print("Connecting to Perspective API")
API_KEY = os.getenv("PERSPECTIVE_API_KEY")
perspective_client = get_perspective_client(API_KEY)
print("Connected to Perspective API")
for day in day_paths:
process_day(root_path + day)
print("All completed")
| true | true |
1c474c7f2acba2c62fabc8f02e4bf556a023e101 | 1,066 | py | Python | jesse/indicators/pfe.py | leaiannotti/jesse | 564c54845774891ff3b5a8d3c02cc7cea890ac54 | [
"MIT"
] | 5 | 2021-05-21T07:39:16.000Z | 2021-11-17T11:08:41.000Z | jesse/indicators/pfe.py | leaiannotti/jesse | 564c54845774891ff3b5a8d3c02cc7cea890ac54 | [
"MIT"
] | null | null | null | jesse/indicators/pfe.py | leaiannotti/jesse | 564c54845774891ff3b5a8d3c02cc7cea890ac54 | [
"MIT"
] | 2 | 2021-05-21T10:14:53.000Z | 2021-05-27T04:39:51.000Z | from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source, slice_candles, same_length
def pfe(candles: np.ndarray, period: int = 10, smoothing: int = 5, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Polarized Fractal Efficiency (PFE)
:param candles: np.ndarray
:param period: int - default: 10
:param smoothing: int - default: 5
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
ln = period - 1
diff = np.diff(source, ln)
a = np.sqrt(np.power(diff, 2) + np.power(period, 2))
b = talib.SUM(np.sqrt(1 + np.power(np.diff(source, 1), 2)), ln)
pfetmp = 100 * same_length(source, a) / same_length(source, b)
res = talib.EMA(np.where(same_length(source, diff) > 0, pfetmp, -pfetmp), smoothing)
return res if sequential else res[-1]
| 31.352941 | 130 | 0.67167 | from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source, slice_candles, same_length
def pfe(candles: np.ndarray, period: int = 10, smoothing: int = 5, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
ln = period - 1
diff = np.diff(source, ln)
a = np.sqrt(np.power(diff, 2) + np.power(period, 2))
b = talib.SUM(np.sqrt(1 + np.power(np.diff(source, 1), 2)), ln)
pfetmp = 100 * same_length(source, a) / same_length(source, b)
res = talib.EMA(np.where(same_length(source, diff) > 0, pfetmp, -pfetmp), smoothing)
return res if sequential else res[-1]
| true | true |
1c474d6b5e003a2cec79900ccf7c78c070a40e62 | 24,545 | py | Python | lib/model_eval/model_eval_ncnet_adap.py | JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching | b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9 | [
"MIT"
] | 1 | 2021-07-22T05:18:10.000Z | 2021-07-22T05:18:10.000Z | lib/model_eval/model_eval_ncnet_adap.py | JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching | b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9 | [
"MIT"
] | null | null | null | lib/model_eval/model_eval_ncnet_adap.py | JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching | b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
import numpy as np
import numpy.matlib
import pickle
from lib.torch_util import Softmax1D
from lib.conv4d import Conv4d
from lib.matching_model import CMDTop
from lib.matching_model import unNormMap1D_to_NormMap2D, NormMap2D_to_unNormMap2D
from lib.showPlot import plot_test_map, plot_test_flow, warpImg_fromMap, warpImg_fromMap2, matplotlib_imshow, return_plot_test_map, get_img_from_fig
import torch.nn.functional as F
def featureL2Norm(feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureExtraction(torch.nn.Module):
def __init__(self, train_fe=False, feature_extraction_cnn='resnet101', feature_extraction_model_file='',
normalization=False, last_layer='', use_cuda=True):
super(FeatureExtraction, self).__init__()
self.normalization = normalization
self.feature_extraction_cnn = feature_extraction_cnn
if feature_extraction_cnn == 'vgg':
self.model = models.vgg16(pretrained=True)
# keep feature extraction network up to indicated layer
vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']
if last_layer == '':
last_layer = 'pool4'
last_layer_idx = vgg_feature_layers.index(last_layer)
self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx + 1])
# for resnet below
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']
if feature_extraction_cnn == 'resnet101':
self.model = models.resnet101(pretrained=True)
if last_layer == '':
last_layer = 'layer3'
resnet_module_list = [getattr(self.model, l) for l in resnet_feature_layers]
last_layer_idx = resnet_feature_layers.index(last_layer)
self.model = nn.Sequential(*resnet_module_list[:last_layer_idx + 1])
if feature_extraction_cnn == 'resnet101fpn':
if feature_extraction_model_file != '':
resnet = models.resnet101(pretrained=True)
# swap stride (2,2) and (1,1) in first layers (PyTorch ResNet is slightly different to caffe2 ResNet)
# this is required for compatibility with caffe2 models
resnet.layer2[0].conv1.stride = (2, 2)
resnet.layer2[0].conv2.stride = (1, 1)
resnet.layer3[0].conv1.stride = (2, 2)
resnet.layer3[0].conv2.stride = (1, 1)
resnet.layer4[0].conv1.stride = (2, 2)
resnet.layer4[0].conv2.stride = (1, 1)
else:
resnet = models.resnet101(pretrained=True)
resnet_module_list = [getattr(resnet, l) for l in resnet_feature_layers]
conv_body = nn.Sequential(*resnet_module_list)
self.model = fpn_body(conv_body,
resnet_feature_layers,
fpn_layers=['layer1', 'layer2', 'layer3'],
normalize=normalization,
hypercols=True)
if feature_extraction_model_file != '':
self.model.load_pretrained_weights(feature_extraction_model_file)
if feature_extraction_cnn == 'densenet201':
self.model = models.densenet201(pretrained=True)
# keep feature extraction network up to denseblock3
# self.model = nn.Sequential(*list(self.model.features.children())[:-3])
# keep feature extraction network up to transitionlayer2
self.model = nn.Sequential(*list(self.model.features.children())[:-4])
if train_fe == False:
# freeze parameters
for param in self.model.parameters():
param.requires_grad = False
# move to GPU
if use_cuda:
self.model = self.model.cuda()
def forward(self, image_batch):
features = self.model(image_batch)
return features
class adap_layer_feat3(nn.Module):
def __init__(self):
super(adap_layer_feat3, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
use_cuda = torch.cuda.is_available()
if use_cuda:
self.conv1.cuda()
self.conv2.cuda()
def forward(self, feature):
feature = feature + self.conv1(feature)
feature = feature + self.conv2(feature)
return feature
class FeatureCorrelation(torch.nn.Module):
def __init__(self, shape='3D', normalization=True):
super(FeatureCorrelation, self).__init__()
self.normalization = normalization
self.shape = shape
self.ReLU = nn.ReLU()
def forward(self, feature_A, feature_B):
if self.shape == '3D':
b, c, h, w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2)
# perform matrix mult.
feature_mul = torch.bmm(feature_B, feature_A)
# indexed [batch,idx_A=row_A+h*col_A,row_B,col_B]
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
elif self.shape == '4D':
b, c, hA, wA = feature_A.size()
b, c, hB, wB = feature_B.size()
# reshape features for matrix multiplication
feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2) # size [b,c,h*w]
feature_B = feature_B.view(b, c, hB * wB) # size [b,c,h*w]
# perform matrix mult.
feature_mul = torch.bmm(feature_A, feature_B)
# indexed [batch,row_A,col_A,row_B,col_B]
correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze(1)
if self.normalization:
correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor))
return correlation_tensor
class NeighConsensus(torch.nn.Module):
def __init__(self, use_cuda=True, kernel_sizes=[3, 3, 3], channels=[10, 10, 1], symmetric_mode=False):
super(NeighConsensus, self).__init__()
self.symmetric_mode = symmetric_mode
self.kernel_sizes = kernel_sizes
self.channels = channels
num_layers = len(kernel_sizes)
nn_modules = list()
for i in range(num_layers):
if i == 0:
ch_in = 1
else:
ch_in = channels[i - 1]
ch_out = channels[i]
k_size = kernel_sizes[i]
nn_modules.append(Conv4d(in_channels=ch_in, out_channels=ch_out, kernel_size=k_size, bias=True))
nn_modules.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*nn_modules)
if use_cuda:
self.conv.cuda()
def forward(self, x):
if self.symmetric_mode:
# apply network on the input and its "transpose" (swapping A-B to B-A ordering of the correlation tensor),
# this second result is "transposed back" to the A-B ordering to match the first result and be able to add together
x = self.conv(x) + self.conv(x.permute(0, 1, 4, 5, 2, 3)).permute(0, 1, 4, 5, 2, 3)
# because of the ReLU layers in between linear layers,
# this operation is different than convolving a single time with the filters+filters^T
# and therefore it makes sense to do this.
else:
x = self.conv(x)
return x
def MutualMatching(corr4d):
# mutual matching
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4)
# get max
corr4d_B_max, _ = torch.max(corr4d_B, dim=1, keepdim=True)
corr4d_A_max, _ = torch.max(corr4d_A, dim=3, keepdim=True)
eps = 1e-5
corr4d_B = corr4d_B / (corr4d_B_max + eps)
corr4d_A = corr4d_A / (corr4d_A_max + eps)
corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d = corr4d * (corr4d_A * corr4d_B) # parenthesis are important for symmetric output
return corr4d
def maxpool4d(corr4d_hres, k_size=4):
slices = []
for i in range(k_size):
for j in range(k_size):
for k in range(k_size):
for l in range(k_size):
slices.append(corr4d_hres[:, 0, i::k_size, j::k_size, k::k_size, l::k_size].unsqueeze(0))
slices = torch.cat(tuple(slices), dim=1)
corr4d, max_idx = torch.max(slices, dim=1, keepdim=True)
max_l = torch.fmod(max_idx, k_size)
max_k = torch.fmod(max_idx.sub(max_l).div(k_size), k_size)
max_j = torch.fmod(max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size), k_size)
max_i = max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size).sub(max_j).div(k_size)
# i,j,k,l represent the *relative* coords of the max point in the box of size k_size*k_size*k_size*k_size
return (corr4d, max_i, max_j, max_k, max_l)
class find_correspondence(nn.Module):
def __init__(self, feature_H, feature_W, beta, kernel_sigma):
super(find_correspondence, self).__init__()
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
self.beta = beta
self.kernel_sigma = kernel_sigma
# regular grid / [-1,1] normalized
self.grid_X, self.grid_Y = np.meshgrid(np.linspace(-1, 1, feature_W),
np.linspace(-1, 1, feature_H)) # grid_X & grid_Y : feature_H x feature_W
self.grid_X = torch.tensor(self.grid_X, dtype=torch.float, requires_grad=False).to(device)
self.grid_Y = torch.tensor(self.grid_Y, dtype=torch.float, requires_grad=False).to(device)
# kernels for computing gradients
self.dx_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 1, 3).expand(1, 2,
1,
3).to(
device)
self.dy_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 3, 1).expand(1, 2,
3,
1).to(
device)
# 1-d indices for generating Gaussian kernels
self.x = np.linspace(0, feature_W - 1, feature_W)
self.x = torch.tensor(self.x, dtype=torch.float, requires_grad=False).to(device)
self.y = np.linspace(0, feature_H - 1, feature_H)
self.y = torch.tensor(self.y, dtype=torch.float, requires_grad=False).to(device)
# 1-d indices for kernel-soft-argmax / [-1,1] normalized
self.x_normal = np.linspace(-1, 1, feature_W)
self.x_normal = torch.tensor(self.x_normal, dtype=torch.float, requires_grad=False).to(device)
self.y_normal = np.linspace(-1, 1, feature_H)
self.y_normal = torch.tensor(self.y_normal, dtype=torch.float, requires_grad=False).to(device)
def apply_gaussian_kernel(self, corr, sigma=5):
b, hw, h, w = corr.size()
idx = corr.max(dim=1)[1] # b x h x w get maximum value along channel
idx_y = (idx // w).view(b, 1, 1, h, w).float()
idx_x = (idx % w).view(b, 1, 1, h, w).float()
x = self.x.view(1, 1, w, 1, 1).expand(b, 1, w, h, w)
y = self.y.view(1, h, 1, 1, 1).expand(b, h, 1, h, w)
gauss_kernel = torch.exp(-((x - idx_x) ** 2 + (y - idx_y) ** 2) / (2 * sigma ** 2))
gauss_kernel = gauss_kernel.view(b, hw, h, w)
return gauss_kernel * corr
def softmax_with_temperature(self, x, beta, d=1):
M, _ = x.max(dim=d, keepdim=True)
x = x - M # subtract maximum value for stability
exp_x = torch.exp(beta * x)
exp_x_sum = exp_x.sum(dim=d, keepdim=True)
return exp_x / exp_x_sum
def kernel_soft_argmax(self, corr):
b, _, h, w = corr.size()
# corr = self.apply_gaussian_kernel(corr, sigma=self.kernel_sigma)
corr = self.softmax_with_temperature(corr, beta=self.beta, d=1)
corr = corr.view(-1, h, w, h, w) # (target hxw) x (source hxw)
grid_x = corr.sum(dim=1, keepdim=False) # marginalize to x-coord.
x_normal = self.x_normal.expand(b, w)
x_normal = x_normal.view(b, w, 1, 1)
grid_x = (grid_x * x_normal).sum(dim=1, keepdim=True) # b x 1 x h x w
grid_y = corr.sum(dim=2, keepdim=False) # marginalize to y-coord.
y_normal = self.y_normal.expand(b, h)
y_normal = y_normal.view(b, h, 1, 1)
grid_y = (grid_y * y_normal).sum(dim=1, keepdim=True) # b x 1 x h x w
return grid_x, grid_y
def get_flow_smoothness(self, flow, GT_mask):
flow_dx = F.conv2d(F.pad(flow, (1, 1, 0, 0)), self.dx_kernel) / 2 # (padLeft, padRight, padTop, padBottom)
flow_dy = F.conv2d(F.pad(flow, (0, 0, 1, 1)), self.dy_kernel) / 2 # (padLeft, padRight, padTop, padBottom)
flow_dx = torch.abs(flow_dx) * GT_mask # consider foreground regions only
flow_dy = torch.abs(flow_dy) * GT_mask
smoothness = torch.cat((flow_dx, flow_dy), 1)
return smoothness
def forward(self, corr, GT_mask=None):
b, _, h, w = corr.size()
grid_X = self.grid_X.expand(b, h, w) # x coordinates of a regular grid
grid_X = grid_X.unsqueeze(1) # b x 1 x h x w
grid_Y = self.grid_Y.expand(b, h, w) # y coordinates of a regular grid
grid_Y = grid_Y.unsqueeze(1)
if self.beta is not None:
grid_x, grid_y = self.kernel_soft_argmax(corr)
else: # discrete argmax
_, idx = torch.max(corr, dim=1)
grid_x = idx % w
grid_x = (grid_x.float() / (w - 1) - 0.5) * 2
grid_y = idx // w
grid_y = (grid_y.float() / (h - 1) - 0.5) * 2
grid_x = grid_x.unsqueeze(1) # b x 1 x h x w
grid_y = grid_y.unsqueeze(1)
grid = torch.cat((grid_x.permute(0, 2, 3, 1), grid_y.permute(0, 2, 3, 1)),
3)
# 2-channels@3rd-dim, first channel for x / second channel for y
flow = torch.cat((grid_x - grid_X, grid_y - grid_Y),
1) # 2-channels@1st-dim, first channel for x / second channel for y
if GT_mask is None: # test
return grid.permute(0, 3, 1, 2), flow.permute(0, 3, 1, 2)
else: # train
smoothness = self.get_flow_smoothness(flow, GT_mask)
return grid, flow, smoothness
class ImMatchNet(nn.Module):
def __init__(self,
feature_extraction_cnn='resnet101',
feature_extraction_last_layer='',
feature_extraction_model_file=None,
return_correlation=False,
ncons_kernel_sizes=[3, 3, 3],
ncons_channels=[10, 10, 1],
normalize_features=True,
train_fe=False,
use_cuda=True,
relocalization_k_size=0,
half_precision=False,
checkpoint=None,
):
super(ImMatchNet, self).__init__()
# Load checkpoint
if checkpoint is not None and checkpoint is not '':
print('Loading checkpoint...')
checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict(
[(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
# override relevant parameters
print('Using checkpoint parameters: ')
ncons_channels = checkpoint['args'].ncons_channels
print(' ncons_channels: ' + str(ncons_channels))
ncons_kernel_sizes = checkpoint['args'].ncons_kernel_sizes
print(' ncons_kernel_sizes: ' + str(ncons_kernel_sizes))
self.ReLU = nn.ReLU()
self.use_cuda = use_cuda
self.normalize_features = normalize_features
print("self.normalize_features", self.normalize_features)
self.return_correlation = return_correlation
self.relocalization_k_size = relocalization_k_size
self.half_precision = half_precision
self.FeatureExtraction = FeatureExtraction(train_fe=train_fe,
feature_extraction_cnn=feature_extraction_cnn,
feature_extraction_model_file=feature_extraction_model_file,
last_layer=feature_extraction_last_layer,
normalization=False,
use_cuda=self.use_cuda)
self.adap_layer_feat3 = adap_layer_feat3()
self.FeatureCorrelation = FeatureCorrelation(shape='4D', normalization=False)
self.NeighConsensus = NeighConsensus(use_cuda=self.use_cuda,
kernel_sizes=ncons_kernel_sizes,
channels=ncons_channels)
feature_H = 25
feature_W = 25
beta = 50
kernel_sigma = 5
self.find_correspondence = find_correspondence(feature_H, feature_W, beta, kernel_sigma)
# nd = 25 * 25 # global correlation
# od = nd + 2
# batch_norm = True
# self.decoder4 = CMDTop(in_channels=od, bn=batch_norm, use_cuda=self.use_cuda)
# Load weights
if checkpoint is not None and checkpoint is not '':
print('Copying weights...')
for name, param in self.FeatureExtraction.state_dict().items():
if 'num_batches_tracked' not in name:
self.FeatureExtraction.state_dict()[name].copy_(
checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in self.NeighConsensus.state_dict().items():
self.NeighConsensus.state_dict()[name].copy_(checkpoint['state_dict']['NeighConsensus.' + name])
for name, param in self.adap_layer_feat3.state_dict().items():
self.adap_layer_feat3.state_dict()[name].copy_(checkpoint['state_dict']['adap_layer_feat3.' + name])
print('Done!')
self.FeatureExtraction.eval()
if self.half_precision:
for p in self.NeighConsensus.parameters():
p.data = p.data.half()
for l in self.NeighConsensus.conv:
if isinstance(l, Conv4d):
l.use_half = True
# used only for foward pass at eval and for training with strong supervision
def forward(self, tnf_batch, writer, writer_position):
# feature extraction
feature_A = self.FeatureExtraction(tnf_batch['source_image'])
feature_B = self.FeatureExtraction(tnf_batch['target_image'])
adap_feature_A = self.adap_layer_feat3(feature_A)
adap_feature_B = self.adap_layer_feat3(feature_B)
adap_feature_A = featureL2Norm(adap_feature_A)
adap_feature_B = featureL2Norm(adap_feature_B)
if self.half_precision:
feature_A = feature_A.half()
feature_B = feature_B.half()
# feature correlation
corr4d = self.FeatureCorrelation(adap_feature_A, adap_feature_B)
# corr4d = self.FeatureCorrelation(feature_A, feature_B)
# do 4d maxpooling for relocalization
if self.relocalization_k_size > 1:
corr4d, max_i, max_j, max_k, max_l = maxpool4d(corr4d, k_size=self.relocalization_k_size)
# WTA
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
nc_B_Avec_WTA = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
# nc_B_Avec = featureL2Norm(self.ReLU(nc_B_Avec))
# compute matching scores
scores_WTA_B, index_WTA_B = torch.max(nc_B_Avec_WTA, dim=1)
# warping Map
index1D_WTA_B = index_WTA_B.view(batch_size, -1)
Map2D_WTA = unNormMap1D_to_NormMap2D(index1D_WTA_B) # (B,2,S,S)
# Map2D_WTA_np = Map2D_WTA.detach().cpu().numpy()
# scores_B_np =scores_B.detach().cpu().numpy()
# grid_np = grid.detach().cpu().numpy()
# corr4d_Net = corr4d.clone()
# corr4d_Net = corr4d_Net.detach()
# run match processing model
corr4d = MutualMatching(corr4d)
corr4d_Net = self.NeighConsensus(corr4d.detach())
corr4d_Net = MutualMatching(corr4d_Net)
nc_B_Avec_NET = corr4d_Net.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
# nc_B_Avec2 = featureL2Norm(self.ReLU(nc_B_Avec2))
# nc_B_Avec_NET = torch.nn.functional.softmax(nc_B_Avec_NET, 1)
Map2D_NET, Flow2D_NET = self.find_correspondence(nc_B_Avec_NET)
# scores_B2, index_B2 = torch.max(nc_B_Avec2, dim=1)
# index1D_B2 = index_B2.view(batch_size, -1)
unNormMap2D_NET = NormMap2D_to_unNormMap2D(Map2D_NET) # (B,2,S,S
# img_grid = return_plot_test_map(tnf_batch['source_image'][0].unsqueeze(0), tnf_batch['target_image'][0].unsqueeze(0), Map2D_WTA[0].unsqueeze(0),
# Map2D_NET[0].unsqueeze(0), scale_factor=16, plot_name='AtoB_MAP')
# writer.add_figure('adap_grid/adap_NET_{}'.format(writer_position), img_grid)
# plot_test_map(tnf_batch['source_image'], tnf_batch['target_image'], MAP2D_NET, Map2D_WTA, scale_factor=16,plot_name='AtoB_MAP' )
# Flow2D_WTA = F.interpolate(input=Map2D_WTA, scale_factor=16, mode='bilinear', align_corners= True)
# Flow2D_NET = F.interpolate(input=grid, scale_factor=16, mode='bilinear', align_corners= True)
#
# Flow2D_WTA = unnormalise_and_convert_mapping_to_flow(Flow2D_WTA)
# Flow2D_NET = unnormalise_and_convert_mapping_to_flow(Flow2D_NET)
# plot_test_flow(tnf_batch['source_image'], tnf_batch['target_image'], Flow2D_NET, Flow2D_WTA, scale_factor=16,plot_name='AtoB_FLOW' )
# Flow2D_WTA = F.interpolate(input = Map2D_WTA, scale_factor = 16, mode = 'bilinear', align_corners= True)
# grid = F.interpolate(input=grid, scale_factor=16, mode='bilinear', align_corners=True)
# if torch.cuda.is_available():
# init_map = torch.FloatTensor(batch_size, 2, fs3, fs4).zero_().cuda()
# else:
# init_map = torch.FloatTensor(batch_size, 2, fs3, fs4).zero_()
# est_map4 = self.decoder4(x1=nc_B_Avec, x3=init_map)
# flow4 = unnormalise_and_convert_mapping_to_flow(est_map4) / self.div
# ratio = 16
# flow4[:, 0, :, :] = flow4[:, 0, :, :] / ratio
# flow4[:, 1, :, :] = flow4[:, 1, :, :] / ratio
if self.relocalization_k_size > 1:
delta4d = (max_i, max_j, max_k, max_l)
return (corr4d, delta4d)
else:
return corr4d_Net
| 48.992016 | 154 | 0.5989 | from __future__ import print_function, division
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
import numpy as np
import numpy.matlib
import pickle
from lib.torch_util import Softmax1D
from lib.conv4d import Conv4d
from lib.matching_model import CMDTop
from lib.matching_model import unNormMap1D_to_NormMap2D, NormMap2D_to_unNormMap2D
from lib.showPlot import plot_test_map, plot_test_flow, warpImg_fromMap, warpImg_fromMap2, matplotlib_imshow, return_plot_test_map, get_img_from_fig
import torch.nn.functional as F
def featureL2Norm(feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureExtraction(torch.nn.Module):
def __init__(self, train_fe=False, feature_extraction_cnn='resnet101', feature_extraction_model_file='',
normalization=False, last_layer='', use_cuda=True):
super(FeatureExtraction, self).__init__()
self.normalization = normalization
self.feature_extraction_cnn = feature_extraction_cnn
if feature_extraction_cnn == 'vgg':
self.model = models.vgg16(pretrained=True)
vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']
if last_layer == '':
last_layer = 'pool4'
last_layer_idx = vgg_feature_layers.index(last_layer)
self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx + 1])
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']
if feature_extraction_cnn == 'resnet101':
self.model = models.resnet101(pretrained=True)
if last_layer == '':
last_layer = 'layer3'
resnet_module_list = [getattr(self.model, l) for l in resnet_feature_layers]
last_layer_idx = resnet_feature_layers.index(last_layer)
self.model = nn.Sequential(*resnet_module_list[:last_layer_idx + 1])
if feature_extraction_cnn == 'resnet101fpn':
if feature_extraction_model_file != '':
resnet = models.resnet101(pretrained=True)
resnet.layer2[0].conv1.stride = (2, 2)
resnet.layer2[0].conv2.stride = (1, 1)
resnet.layer3[0].conv1.stride = (2, 2)
resnet.layer3[0].conv2.stride = (1, 1)
resnet.layer4[0].conv1.stride = (2, 2)
resnet.layer4[0].conv2.stride = (1, 1)
else:
resnet = models.resnet101(pretrained=True)
resnet_module_list = [getattr(resnet, l) for l in resnet_feature_layers]
conv_body = nn.Sequential(*resnet_module_list)
self.model = fpn_body(conv_body,
resnet_feature_layers,
fpn_layers=['layer1', 'layer2', 'layer3'],
normalize=normalization,
hypercols=True)
if feature_extraction_model_file != '':
self.model.load_pretrained_weights(feature_extraction_model_file)
if feature_extraction_cnn == 'densenet201':
self.model = models.densenet201(pretrained=True)
self.model = nn.Sequential(*list(self.model.features.children())[:-4])
if train_fe == False:
for param in self.model.parameters():
param.requires_grad = False
if use_cuda:
self.model = self.model.cuda()
def forward(self, image_batch):
features = self.model(image_batch)
return features
class adap_layer_feat3(nn.Module):
def __init__(self):
super(adap_layer_feat3, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
use_cuda = torch.cuda.is_available()
if use_cuda:
self.conv1.cuda()
self.conv2.cuda()
def forward(self, feature):
feature = feature + self.conv1(feature)
feature = feature + self.conv2(feature)
return feature
class FeatureCorrelation(torch.nn.Module):
def __init__(self, shape='3D', normalization=True):
super(FeatureCorrelation, self).__init__()
self.normalization = normalization
self.shape = shape
self.ReLU = nn.ReLU()
def forward(self, feature_A, feature_B):
if self.shape == '3D':
b, c, h, w = feature_A.size()
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2)
feature_mul = torch.bmm(feature_B, feature_A)
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
elif self.shape == '4D':
b, c, hA, wA = feature_A.size()
b, c, hB, wB = feature_B.size()
feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2)
feature_B = feature_B.view(b, c, hB * wB)
feature_mul = torch.bmm(feature_A, feature_B)
correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze(1)
if self.normalization:
correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor))
return correlation_tensor
class NeighConsensus(torch.nn.Module):
def __init__(self, use_cuda=True, kernel_sizes=[3, 3, 3], channels=[10, 10, 1], symmetric_mode=False):
super(NeighConsensus, self).__init__()
self.symmetric_mode = symmetric_mode
self.kernel_sizes = kernel_sizes
self.channels = channels
num_layers = len(kernel_sizes)
nn_modules = list()
for i in range(num_layers):
if i == 0:
ch_in = 1
else:
ch_in = channels[i - 1]
ch_out = channels[i]
k_size = kernel_sizes[i]
nn_modules.append(Conv4d(in_channels=ch_in, out_channels=ch_out, kernel_size=k_size, bias=True))
nn_modules.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*nn_modules)
if use_cuda:
self.conv.cuda()
def forward(self, x):
if self.symmetric_mode:
x = self.conv(x) + self.conv(x.permute(0, 1, 4, 5, 2, 3)).permute(0, 1, 4, 5, 2, 3)
else:
x = self.conv(x)
return x
def MutualMatching(corr4d):
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4)
corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4)
corr4d_B_max, _ = torch.max(corr4d_B, dim=1, keepdim=True)
corr4d_A_max, _ = torch.max(corr4d_A, dim=3, keepdim=True)
eps = 1e-5
corr4d_B = corr4d_B / (corr4d_B_max + eps)
corr4d_A = corr4d_A / (corr4d_A_max + eps)
corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d = corr4d * (corr4d_A * corr4d_B)
return corr4d
def maxpool4d(corr4d_hres, k_size=4):
slices = []
for i in range(k_size):
for j in range(k_size):
for k in range(k_size):
for l in range(k_size):
slices.append(corr4d_hres[:, 0, i::k_size, j::k_size, k::k_size, l::k_size].unsqueeze(0))
slices = torch.cat(tuple(slices), dim=1)
corr4d, max_idx = torch.max(slices, dim=1, keepdim=True)
max_l = torch.fmod(max_idx, k_size)
max_k = torch.fmod(max_idx.sub(max_l).div(k_size), k_size)
max_j = torch.fmod(max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size), k_size)
max_i = max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size).sub(max_j).div(k_size)
return (corr4d, max_i, max_j, max_k, max_l)
class find_correspondence(nn.Module):
def __init__(self, feature_H, feature_W, beta, kernel_sigma):
super(find_correspondence, self).__init__()
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
self.beta = beta
self.kernel_sigma = kernel_sigma
self.grid_X, self.grid_Y = np.meshgrid(np.linspace(-1, 1, feature_W),
np.linspace(-1, 1, feature_H))
self.grid_X = torch.tensor(self.grid_X, dtype=torch.float, requires_grad=False).to(device)
self.grid_Y = torch.tensor(self.grid_Y, dtype=torch.float, requires_grad=False).to(device)
self.dx_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 1, 3).expand(1, 2,
1,
3).to(
device)
self.dy_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 3, 1).expand(1, 2,
3,
1).to(
device)
self.x = np.linspace(0, feature_W - 1, feature_W)
self.x = torch.tensor(self.x, dtype=torch.float, requires_grad=False).to(device)
self.y = np.linspace(0, feature_H - 1, feature_H)
self.y = torch.tensor(self.y, dtype=torch.float, requires_grad=False).to(device)
self.x_normal = np.linspace(-1, 1, feature_W)
self.x_normal = torch.tensor(self.x_normal, dtype=torch.float, requires_grad=False).to(device)
self.y_normal = np.linspace(-1, 1, feature_H)
self.y_normal = torch.tensor(self.y_normal, dtype=torch.float, requires_grad=False).to(device)
def apply_gaussian_kernel(self, corr, sigma=5):
b, hw, h, w = corr.size()
idx = corr.max(dim=1)[1]
idx_y = (idx // w).view(b, 1, 1, h, w).float()
idx_x = (idx % w).view(b, 1, 1, h, w).float()
x = self.x.view(1, 1, w, 1, 1).expand(b, 1, w, h, w)
y = self.y.view(1, h, 1, 1, 1).expand(b, h, 1, h, w)
gauss_kernel = torch.exp(-((x - idx_x) ** 2 + (y - idx_y) ** 2) / (2 * sigma ** 2))
gauss_kernel = gauss_kernel.view(b, hw, h, w)
return gauss_kernel * corr
def softmax_with_temperature(self, x, beta, d=1):
M, _ = x.max(dim=d, keepdim=True)
x = x - M
exp_x = torch.exp(beta * x)
exp_x_sum = exp_x.sum(dim=d, keepdim=True)
return exp_x / exp_x_sum
def kernel_soft_argmax(self, corr):
b, _, h, w = corr.size()
corr = self.softmax_with_temperature(corr, beta=self.beta, d=1)
corr = corr.view(-1, h, w, h, w)
grid_x = corr.sum(dim=1, keepdim=False)
x_normal = self.x_normal.expand(b, w)
x_normal = x_normal.view(b, w, 1, 1)
grid_x = (grid_x * x_normal).sum(dim=1, keepdim=True)
grid_y = corr.sum(dim=2, keepdim=False)
y_normal = self.y_normal.expand(b, h)
y_normal = y_normal.view(b, h, 1, 1)
grid_y = (grid_y * y_normal).sum(dim=1, keepdim=True)
return grid_x, grid_y
def get_flow_smoothness(self, flow, GT_mask):
flow_dx = F.conv2d(F.pad(flow, (1, 1, 0, 0)), self.dx_kernel) / 2
flow_dy = F.conv2d(F.pad(flow, (0, 0, 1, 1)), self.dy_kernel) / 2
flow_dx = torch.abs(flow_dx) * GT_mask
flow_dy = torch.abs(flow_dy) * GT_mask
smoothness = torch.cat((flow_dx, flow_dy), 1)
return smoothness
def forward(self, corr, GT_mask=None):
b, _, h, w = corr.size()
grid_X = self.grid_X.expand(b, h, w)
grid_X = grid_X.unsqueeze(1)
grid_Y = self.grid_Y.expand(b, h, w)
grid_Y = grid_Y.unsqueeze(1)
if self.beta is not None:
grid_x, grid_y = self.kernel_soft_argmax(corr)
else:
_, idx = torch.max(corr, dim=1)
grid_x = idx % w
grid_x = (grid_x.float() / (w - 1) - 0.5) * 2
grid_y = idx // w
grid_y = (grid_y.float() / (h - 1) - 0.5) * 2
grid_x = grid_x.unsqueeze(1)
grid_y = grid_y.unsqueeze(1)
grid = torch.cat((grid_x.permute(0, 2, 3, 1), grid_y.permute(0, 2, 3, 1)),
3)
flow = torch.cat((grid_x - grid_X, grid_y - grid_Y),
1)
if GT_mask is None:
return grid.permute(0, 3, 1, 2), flow.permute(0, 3, 1, 2)
else:
smoothness = self.get_flow_smoothness(flow, GT_mask)
return grid, flow, smoothness
class ImMatchNet(nn.Module):
def __init__(self,
feature_extraction_cnn='resnet101',
feature_extraction_last_layer='',
feature_extraction_model_file=None,
return_correlation=False,
ncons_kernel_sizes=[3, 3, 3],
ncons_channels=[10, 10, 1],
normalize_features=True,
train_fe=False,
use_cuda=True,
relocalization_k_size=0,
half_precision=False,
checkpoint=None,
):
super(ImMatchNet, self).__init__()
if checkpoint is not None and checkpoint is not '':
print('Loading checkpoint...')
checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict(
[(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
print('Using checkpoint parameters: ')
ncons_channels = checkpoint['args'].ncons_channels
print(' ncons_channels: ' + str(ncons_channels))
ncons_kernel_sizes = checkpoint['args'].ncons_kernel_sizes
print(' ncons_kernel_sizes: ' + str(ncons_kernel_sizes))
self.ReLU = nn.ReLU()
self.use_cuda = use_cuda
self.normalize_features = normalize_features
print("self.normalize_features", self.normalize_features)
self.return_correlation = return_correlation
self.relocalization_k_size = relocalization_k_size
self.half_precision = half_precision
self.FeatureExtraction = FeatureExtraction(train_fe=train_fe,
feature_extraction_cnn=feature_extraction_cnn,
feature_extraction_model_file=feature_extraction_model_file,
last_layer=feature_extraction_last_layer,
normalization=False,
use_cuda=self.use_cuda)
self.adap_layer_feat3 = adap_layer_feat3()
self.FeatureCorrelation = FeatureCorrelation(shape='4D', normalization=False)
self.NeighConsensus = NeighConsensus(use_cuda=self.use_cuda,
kernel_sizes=ncons_kernel_sizes,
channels=ncons_channels)
feature_H = 25
feature_W = 25
beta = 50
kernel_sigma = 5
self.find_correspondence = find_correspondence(feature_H, feature_W, beta, kernel_sigma)
if checkpoint is not None and checkpoint is not '':
print('Copying weights...')
for name, param in self.FeatureExtraction.state_dict().items():
if 'num_batches_tracked' not in name:
self.FeatureExtraction.state_dict()[name].copy_(
checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in self.NeighConsensus.state_dict().items():
self.NeighConsensus.state_dict()[name].copy_(checkpoint['state_dict']['NeighConsensus.' + name])
for name, param in self.adap_layer_feat3.state_dict().items():
self.adap_layer_feat3.state_dict()[name].copy_(checkpoint['state_dict']['adap_layer_feat3.' + name])
print('Done!')
self.FeatureExtraction.eval()
if self.half_precision:
for p in self.NeighConsensus.parameters():
p.data = p.data.half()
for l in self.NeighConsensus.conv:
if isinstance(l, Conv4d):
l.use_half = True
def forward(self, tnf_batch, writer, writer_position):
feature_A = self.FeatureExtraction(tnf_batch['source_image'])
feature_B = self.FeatureExtraction(tnf_batch['target_image'])
adap_feature_A = self.adap_layer_feat3(feature_A)
adap_feature_B = self.adap_layer_feat3(feature_B)
adap_feature_A = featureL2Norm(adap_feature_A)
adap_feature_B = featureL2Norm(adap_feature_B)
if self.half_precision:
feature_A = feature_A.half()
feature_B = feature_B.half()
corr4d = self.FeatureCorrelation(adap_feature_A, adap_feature_B)
if self.relocalization_k_size > 1:
corr4d, max_i, max_j, max_k, max_l = maxpool4d(corr4d, k_size=self.relocalization_k_size)
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
nc_B_Avec_WTA = corr4d.view(batch_size, fs1 * fs2, fs3, fs4)
scores_WTA_B, index_WTA_B = torch.max(nc_B_Avec_WTA, dim=1)
index1D_WTA_B = index_WTA_B.view(batch_size, -1)
Map2D_WTA = unNormMap1D_to_NormMap2D(index1D_WTA_B)
corr4d = MutualMatching(corr4d)
corr4d_Net = self.NeighConsensus(corr4d.detach())
corr4d_Net = MutualMatching(corr4d_Net)
nc_B_Avec_NET = corr4d_Net.view(batch_size, fs1 * fs2, fs3, fs4)
Map2D_NET, Flow2D_NET = self.find_correspondence(nc_B_Avec_NET)
unNormMap2D_NET = NormMap2D_to_unNormMap2D(Map2D_NET)
if self.relocalization_k_size > 1:
delta4d = (max_i, max_j, max_k, max_l)
return (corr4d, delta4d)
else:
return corr4d_Net
| true | true |
1c474eb2a7180c4b80cf9601418dd0b801e92818 | 1,880 | py | Python | pyleecan/Methods/Slot/HoleM53/check.py | Kelos-Zhu/pyleecan | 368f8379688e31a6c26d2c1cd426f21dfbceff2a | [
"Apache-2.0"
] | 2 | 2019-06-08T15:04:39.000Z | 2020-09-07T13:32:22.000Z | pyleecan/Methods/Slot/HoleM53/check.py | lyhehehe/pyleecan | 421e9a843bf30d796415c77dc934546adffd1cd7 | [
"Apache-2.0"
] | null | null | null | pyleecan/Methods/Slot/HoleM53/check.py | lyhehehe/pyleecan | 421e9a843bf30d796415c77dc934546adffd1cd7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from numpy import pi
from ....Methods.Slot.Slot.check import SlotCheckError
def check(self):
"""Check that the HoleM53 object is correct
Parameters
----------
self : HoleM53
A HoleM53 object
Returns
-------
None
Raises
-------
S53_Rbo0CheckError
You must have H0 < Rbo
S53_Rbo1CheckError
You must have H1 < Rbo
S53_W4CheckError
You must have W4 < pi/2
S53_W5CheckError
You must have W5 >=0
"""
# Check that everything is set
if self.W1 is None:
raise S53_NoneError("You must set W1 !")
elif self.W2 is None:
raise S53_NoneError("You must set W2 !")
elif self.W3 is None:
raise S53_NoneError("You must set W3 !")
elif self.W4 is None:
raise S53_NoneError("You must set W4 !")
elif self.H0 is None:
raise S53_NoneError("You must set H0 !")
elif self.H1 is None:
raise S53_NoneError("You must set H1 !")
elif self.H2 is None:
raise S53_NoneError("You must set H2 !")
elif self.H3 is None:
raise S53_NoneError("You must set H3 !")
Rbo = self.get_Rbo()
if Rbo <= self.H0:
raise S53_Rbo0CheckError("You must have H0 < Rbo")
if Rbo <= self.H1:
raise S53_Rbo1CheckError("You must have H1 < Rbo")
if pi / 2 <= self.W4:
raise S53_W4CheckError("You must have W4 < pi/2")
if self.comp_W5() < 0:
raise S53_W5CheckError("You must have W5 >=0")
class S53_NoneError(SlotCheckError):
"""Raised when a propery of HoleM53 is None
"""
pass
class S53_Rbo0CheckError(SlotCheckError):
""" """
pass
class S53_Rbo1CheckError(SlotCheckError):
""" """
pass
class S53_W4CheckError(SlotCheckError):
""" """
pass
class S53_W5CheckError(SlotCheckError):
""" """
pass
| 20 | 58 | 0.600532 |
from numpy import pi
from ....Methods.Slot.Slot.check import SlotCheckError
def check(self):
if self.W1 is None:
raise S53_NoneError("You must set W1 !")
elif self.W2 is None:
raise S53_NoneError("You must set W2 !")
elif self.W3 is None:
raise S53_NoneError("You must set W3 !")
elif self.W4 is None:
raise S53_NoneError("You must set W4 !")
elif self.H0 is None:
raise S53_NoneError("You must set H0 !")
elif self.H1 is None:
raise S53_NoneError("You must set H1 !")
elif self.H2 is None:
raise S53_NoneError("You must set H2 !")
elif self.H3 is None:
raise S53_NoneError("You must set H3 !")
Rbo = self.get_Rbo()
if Rbo <= self.H0:
raise S53_Rbo0CheckError("You must have H0 < Rbo")
if Rbo <= self.H1:
raise S53_Rbo1CheckError("You must have H1 < Rbo")
if pi / 2 <= self.W4:
raise S53_W4CheckError("You must have W4 < pi/2")
if self.comp_W5() < 0:
raise S53_W5CheckError("You must have W5 >=0")
class S53_NoneError(SlotCheckError):
pass
class S53_Rbo0CheckError(SlotCheckError):
pass
class S53_Rbo1CheckError(SlotCheckError):
pass
class S53_W4CheckError(SlotCheckError):
pass
class S53_W5CheckError(SlotCheckError):
pass
| true | true |
1c47503a63b297ae151dad61e17a23efab7bef67 | 664 | py | Python | bot/bot/base.py | TSPS-Team/Project | b1d83cb7957420b8348939f0a1d36f506095519c | [
"MIT"
] | null | null | null | bot/bot/base.py | TSPS-Team/Project | b1d83cb7957420b8348939f0a1d36f506095519c | [
"MIT"
] | null | null | null | bot/bot/base.py | TSPS-Team/Project | b1d83cb7957420b8348939f0a1d36f506095519c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import annotations
from server import Interface
from telegram.bot import Bot
class State:
player: Player
app_info: 'AppInfo'
bot: Bot
def __init__(self, player, app_info) -> None:
self.player = player
self.bot = app_info.bot
self.app_info = app_info
def callback(self, update, context):
pass
def text_callback(self, update, context):
pass
class Player:
lobby: 'Lobby'
state: State
game: 'Game'
def __init__(self, name: str, id: int) -> None:
self.name = name
self.id = id
def __str__(self):
return self.name
| 18.971429 | 51 | 0.621988 |
from __future__ import annotations
from server import Interface
from telegram.bot import Bot
class State:
player: Player
app_info: 'AppInfo'
bot: Bot
def __init__(self, player, app_info) -> None:
self.player = player
self.bot = app_info.bot
self.app_info = app_info
def callback(self, update, context):
pass
def text_callback(self, update, context):
pass
class Player:
lobby: 'Lobby'
state: State
game: 'Game'
def __init__(self, name: str, id: int) -> None:
self.name = name
self.id = id
def __str__(self):
return self.name
| true | true |
1c47504f9eb14b016fc1dc1c1fcbb3dea481e1a2 | 856 | py | Python | aiofcm/client.py | cyberbudy/aiofcm | 30e66b872aa2e1fc43ef4884fb84ba23b91879c5 | [
"Apache-2.0"
] | 30 | 2017-05-11T08:21:45.000Z | 2021-11-20T13:52:13.000Z | aiofcm/client.py | cyberbudy/aiofcm | 30e66b872aa2e1fc43ef4884fb84ba23b91879c5 | [
"Apache-2.0"
] | 12 | 2017-05-22T16:42:03.000Z | 2021-08-09T11:11:47.000Z | aiofcm/client.py | cyberbudy/aiofcm | 30e66b872aa2e1fc43ef4884fb84ba23b91879c5 | [
"Apache-2.0"
] | 16 | 2017-05-22T11:30:55.000Z | 2021-11-11T09:48:04.000Z | import asyncio
from typing import Optional, NoReturn
from aiofcm.connection import FCMConnectionPool
from aiofcm.common import Message, MessageResponse
from aiofcm.logging import logger
class FCM:
def __init__(self, sender_id, api_key, max_connections=10, loop=None):
# type: (int, str, int, Optional[asyncio.AbstractEventLoop]) -> NoReturn
self.pool = FCMConnectionPool(sender_id, api_key, max_connections, loop)
async def send_message(self, message: Message) -> MessageResponse:
response = await self.pool.send_message(message)
if not response.is_successful:
msg = 'Status of message %s is %s' %\
(message.message_id, response.status)
if response.description:
msg += ' (%s)' % response.description
logger.error(msg)
return response
| 37.217391 | 80 | 0.679907 | import asyncio
from typing import Optional, NoReturn
from aiofcm.connection import FCMConnectionPool
from aiofcm.common import Message, MessageResponse
from aiofcm.logging import logger
class FCM:
def __init__(self, sender_id, api_key, max_connections=10, loop=None):
self.pool = FCMConnectionPool(sender_id, api_key, max_connections, loop)
async def send_message(self, message: Message) -> MessageResponse:
response = await self.pool.send_message(message)
if not response.is_successful:
msg = 'Status of message %s is %s' %\
(message.message_id, response.status)
if response.description:
msg += ' (%s)' % response.description
logger.error(msg)
return response
| true | true |
1c4751b7582b662927b44f9a171203401afd2ce3 | 36,054 | py | Python | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | LeeCenY/turicreate | fb2f3bf313e831ceb42a2e10aacda6e472ea8d93 | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | LeeCenY/turicreate | fb2f3bf313e831ceb42a2e10aacda6e472ea8d93 | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | LeeCenY/turicreate | fb2f3bf313e831ceb42a2e10aacda6e472ea8d93 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import turicreate as _tc
import numpy as _np
import time as _time
from turicreate.toolkits._model import CustomModel as _CustomModel
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits import evaluation as _evaluation
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from .. import _mxnet_utils
from turicreate import extensions as _extensions
from .. import _pre_trained_models
BITMAP_WIDTH = 28
BITMAP_HEIGHT = 28
TRAIN_VALIDATION_SPLIT = .95
def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
"""
Performs some sanity checks on the SFrame provided as input to
`turicreate.drawing_classifier.create` and raises a ToolkitError
if something in the dataset is missing or wrong.
"""
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!")
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
"""
Create a :class:`DrawingClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``target``
parameters will be extracted for training the drawing classifier.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type.
feature : string optional
Name of the column containing the input drawings. 'None' (the default)
indicates the column in `dataset` named "drawing" should be used as the
feature.
The feature column can contain both bitmap-based drawings as well as
stroke-based drawings. Bitmap-based drawing input can be a grayscale
tc.Image of any size.
Stroke-based drawing input must be in the following format:
Every drawing must be represented by a list of strokes, where each
stroke must be a list of points in the order in which they were drawn
on the canvas.
Each point must be a dictionary with two keys, "x" and "y", and their
respective values must be numerical, i.e. either integer or float.
validation_set : SFrame optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
warm_start : string optional
A string to denote which pretrained model to use. Set to "auto"
by default which uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset. Here is a list of all the pretrained models that
can be passed in as this argument:
"auto": Uses quickdraw_245_v0
"quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset.
batch_size: int optional
The number of drawings per training step. If not set, a default
value of 256 will be used. If you are getting memory errors,
try decreasing this value. If you have a powerful computer, increasing
this value may improve performance.
max_iterations : int optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model.
verbose : bool optional
If True, print progress updates and model details.
Returns
-------
out : DrawingClassifier
A trained :class:`DrawingClassifier` model.
See Also
--------
DrawingClassifier
Examples
--------
.. sourcecode:: python
# Train a drawing classifier model
>>> model = turicreate.drawing_classifier.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
"""
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
start_time = _time.time()
# @TODO: Should be able to automatically choose number of iterations
# based on data size: Tracked in Github Issue #1576
# automatically infer feature column
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
# Inside training scope
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
# Computes softmax cross entropy loss.
loss = softmax_cross_entropy(z, y)
# Backpropagate the error for one iteration.
loss.backward()
# Make one step of parameter update. Trainer needs to know the
# batch size of data to normalize the gradient by 1/batch_size.
trainer.step(train_batch.data[0].shape[0])
# calculate training metrics
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
# Compute training accuracy
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
# Compute validation accuracy
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
# nan if validation_set=None
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
class DrawingClassifier(_CustomModel):
"""
A trained model that is ready to use for classification, and to be
exported to Core ML.
This model should not be constructed directly.
"""
_PYTHON_DRAWING_CLASSIFIER_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "drawing_classifier"
def _get_native_state(self):
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_DRAWING_CLASSIFIER_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version,
cls._PYTHON_DRAWING_CLASSIFIER_VERSION)
from ._model_architecture import Model as _Model
net = _Model(num_classes = len(state['classes']), prefix = 'drawing_')
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(
net_params, state['_model'], ctx=ctx
)
state['_model'] = net
# For a model trained on integer classes, when saved and loaded back,
# the classes are loaded as floats. The following if statement casts
# the loaded "float" classes back to int.
if len(state['classes']) > 0 and isinstance(state['classes'][0], float):
state['classes'] = list(map(int, state['classes']))
return DrawingClassifier(state)
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the DrawingClassifier.
"""
return self.__repr__()
def __repr__(self):
"""
Returns a string description of the model when the model name is
entered in the terminal.
"""
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Number of classes', 'num_classes'),
('Feature column', 'feature'),
('Target column', 'target')
]
training_fields = [
('Training Iterations', 'max_iterations'),
('Training Accuracy', 'training_accuracy'),
('Validation Accuracy', 'validation_accuracy'),
('Training Time', 'training_time'),
('Number of Examples', 'num_examples'),
('Batch Size', 'batch_size'),
('Final Loss (specific to model)', 'training_loss')
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def export_coreml(self, filename, verbose=False):
"""
Save the model in Core ML format. The Core ML model takes a grayscale
drawing of fixed size as input and produces two outputs:
`classLabel` and `labelProbabilities`.
The first one, `classLabel` is an integer or string (depending on the
classes the model was trained on) to store the label of the top
prediction by the model.
The second one, `labelProbabilities`, is a dictionary with all the
class labels in the dataset as the keys, and their respective
probabilities as the values.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
verbose : bool optional
If True, prints export progress.
Examples
--------
>>> model.export_coreml('drawing_classifier.mlmodel')
"""
import mxnet as _mx
from .._mxnet_to_coreml import _mxnet_converter
import coremltools as _coremltools
batch_size = 1
image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)
s_image = _mx.sym.Variable(self.feature,
shape=image_shape, dtype=_np.float32)
from copy import copy as _copy
net = _copy(self._model)
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
coreml_model = _mxnet_converter.convert(mod, mode='classifier',
class_labels=self.classes,
input_shape=[(self.feature, image_shape)],
builder=None, verbose=verbose,
preprocessor_args={
'image_input_names': [self.feature],
'image_scale': 1.0/255
})
DESIRED_OUTPUT_NAME = self.target + "Probabilities"
spec = coreml_model._spec
class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1
probabilities_output_index = 1-class_label_output_index
spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME
spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME
spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME
from turicreate.toolkits import _coreml_utils
model_type = "drawing classifier"
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)
spec.description.input[0].shortDescription = self.feature
spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'
spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, filename)
def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
"""
Predict with probabilities. The core prediction part that both
`evaluate` and `predict` share.
Returns an SFrame with two columns, self.target, and "probabilities".
The column with column name, self.target, contains the predictions made
by the model for the provided dataset.
The "probabilities" column contains the probabilities for each class
that the model predicted for the data provided to the function.
"""
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
# Do not print progress if only a few samples are predicted
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()}))
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the feature and target columns used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print(results['accuracy'])
"""
if self.target not in dataset.column_names():
raise _ToolkitError("Must provide ground truth column, '"
+ self.target + "' in the evaluation dataset.")
predicted = self._predict_with_probabilities(dataset, batch_size, verbose)
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
metrics = avail_metrics if metric == 'auto' else [metric]
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(
dataset[self.target], predicted[self.target])
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(
dataset[self.target], predicted[self.target])
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(
dataset[self.target], predicted[self.target])
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(
dataset[self.target], predicted[self.target])
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(
dataset[self.target], predicted[self.target])
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
return ret
def predict_topk(self, dataset, output_type="probability", k=3,
batch_size=None):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability` or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image | list
Drawings to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the
prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "rank"])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1")
if k <= 0:
raise ValueError("'k' must be >= 1")
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
prob_vector = self.predict(
dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [
{'class': classes[i], 'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
else:
assert(output_type == 'rank')
results = prob_vector.apply(lambda p: [
{'class': classes[index], 'rank': rank}
for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]
)
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results
def predict(self, data, output_type='class', batch_size=None, verbose=True):
"""
Predict on an SFrame or SArray of drawings, or on a single drawing.
Parameters
----------
data : SFrame | SArray | tc.Image | list
The drawing(s) on which to perform drawing classification.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
If the data is a single drawing, it can be either of type tc.Image,
in which case it is a bitmap-based drawing input,
or of type list, in which case it is a stroke-based drawing input.
output_type : {'probability', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. Label ordering is dictated by the ``classes``
member variable.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
a drawing and contains a single value corresponding to the
predicted label. Each prediction will have type integer or string
depending on the type of the classes the model was trained on.
If `data` is a single drawing, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Print predictions, for a better overview
>>> print(pred)
dtype: int
Rows: 10
[3, 4, 3, 3, 4, 5, 8, 8, 8, 4]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
# single input
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"]
| 41.632794 | 123 | 0.608837 |
import turicreate as _tc
import numpy as _np
import time as _time
from turicreate.toolkits._model import CustomModel as _CustomModel
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits import evaluation as _evaluation
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from .. import _mxnet_utils
from turicreate import extensions as _extensions
from .. import _pre_trained_models
BITMAP_WIDTH = 28
BITMAP_HEIGHT = 28
TRAIN_VALIDATION_SPLIT = .95
def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!")
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
start_time = _time.time()
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
loss = softmax_cross_entropy(z, y)
loss.backward()
trainer.step(train_batch.data[0].shape[0])
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
class DrawingClassifier(_CustomModel):
_PYTHON_DRAWING_CLASSIFIER_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "drawing_classifier"
def _get_native_state(self):
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_DRAWING_CLASSIFIER_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version,
cls._PYTHON_DRAWING_CLASSIFIER_VERSION)
from ._model_architecture import Model as _Model
net = _Model(num_classes = len(state['classes']), prefix = 'drawing_')
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(
net_params, state['_model'], ctx=ctx
)
state['_model'] = net
if len(state['classes']) > 0 and isinstance(state['classes'][0], float):
state['classes'] = list(map(int, state['classes']))
return DrawingClassifier(state)
def __str__(self):
return self.__repr__()
def __repr__(self):
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
model_fields = [
('Number of classes', 'num_classes'),
('Feature column', 'feature'),
('Target column', 'target')
]
training_fields = [
('Training Iterations', 'max_iterations'),
('Training Accuracy', 'training_accuracy'),
('Validation Accuracy', 'validation_accuracy'),
('Training Time', 'training_time'),
('Number of Examples', 'num_examples'),
('Batch Size', 'batch_size'),
('Final Loss (specific to model)', 'training_loss')
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def export_coreml(self, filename, verbose=False):
import mxnet as _mx
from .._mxnet_to_coreml import _mxnet_converter
import coremltools as _coremltools
batch_size = 1
image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)
s_image = _mx.sym.Variable(self.feature,
shape=image_shape, dtype=_np.float32)
from copy import copy as _copy
net = _copy(self._model)
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
coreml_model = _mxnet_converter.convert(mod, mode='classifier',
class_labels=self.classes,
input_shape=[(self.feature, image_shape)],
builder=None, verbose=verbose,
preprocessor_args={
'image_input_names': [self.feature],
'image_scale': 1.0/255
})
DESIRED_OUTPUT_NAME = self.target + "Probabilities"
spec = coreml_model._spec
class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1
probabilities_output_index = 1-class_label_output_index
spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME
spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME
spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME
from turicreate.toolkits import _coreml_utils
model_type = "drawing classifier"
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)
spec.description.input[0].shortDescription = self.feature
spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'
spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, filename)
def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()}))
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):
if self.target not in dataset.column_names():
raise _ToolkitError("Must provide ground truth column, '"
+ self.target + "' in the evaluation dataset.")
predicted = self._predict_with_probabilities(dataset, batch_size, verbose)
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
metrics = avail_metrics if metric == 'auto' else [metric]
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(
dataset[self.target], predicted[self.target])
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(
dataset[self.target], predicted[self.target])
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(
dataset[self.target], predicted[self.target])
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(
dataset[self.target], predicted[self.target])
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(
dataset[self.target], predicted[self.target])
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
return ret
def predict_topk(self, dataset, output_type="probability", k=3,
batch_size=None):
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "rank"])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1")
if k <= 0:
raise ValueError("'k' must be >= 1")
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
prob_vector = self.predict(
dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [
{'class': classes[i], 'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
else:
assert(output_type == 'rank')
results = prob_vector.apply(lambda p: [
{'class': classes[index], 'rank': rank}
for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]
)
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results
def predict(self, data, output_type='class', batch_size=None, verbose=True):
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"]
| true | true |
1c47529775227539b203847b8de750e8bd66423a | 407 | py | Python | cont/contapp/models.py | Chuox/Contador_Palabras | 2be98392351536416baa38c90fc62950138d84f1 | [
"MIT"
] | null | null | null | cont/contapp/models.py | Chuox/Contador_Palabras | 2be98392351536416baa38c90fc62950138d84f1 | [
"MIT"
] | null | null | null | cont/contapp/models.py | Chuox/Contador_Palabras | 2be98392351536416baa38c90fc62950138d84f1 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
# Create your models here.
class Palabras(models.Model):
url = models.CharField(max_length=99999,default="https://es.wikipedia.org/")
texto = models.CharField(max_length=9999999,default="")
def __str__(self):
return self.url
def get_absolute_url(self):
return reverse('count-detail', kwargs={'pk': self.pk}) | 31.307692 | 80 | 0.702703 | from django.db import models
from django.urls import reverse
class Palabras(models.Model):
url = models.CharField(max_length=99999,default="https://es.wikipedia.org/")
texto = models.CharField(max_length=9999999,default="")
def __str__(self):
return self.url
def get_absolute_url(self):
return reverse('count-detail', kwargs={'pk': self.pk}) | true | true |
1c4752b75cce49cce05e2ea439f39e239799fab9 | 2,740 | py | Python | mvmv/mvmv.py | movermeyer/mvmv | 23c1c4202b6fb0ef08d6c07975107dcec87d7208 | [
"MIT"
] | 1 | 2019-01-26T16:35:31.000Z | 2019-01-26T16:35:31.000Z | mvmv/mvmv.py | movermeyer/mvmv | 23c1c4202b6fb0ef08d6c07975107dcec87d7208 | [
"MIT"
] | 5 | 2015-01-22T23:24:05.000Z | 2015-01-25T04:49:03.000Z | mvmv/mvmv.py | movermeyer/mvmv | 23c1c4202b6fb0ef08d6c07975107dcec87d7208 | [
"MIT"
] | 3 | 2015-02-25T17:51:41.000Z | 2018-03-04T20:29:59.000Z | import codecs
import mimetypes
import os
import re
import sqlite3
from fuzzywuzzy import fuzz
# common words in movies that we don't want to search the database for
common_words = [
"The",
"Them",
"A",
"An",
"In",
]
# blacklist of common garbage that fills up movie names
blacklist = [
"BluRay",
"\d{3,4}p",
"(HD|DVD|BR)Rip",
"x\d{3}",
"XViD(-.*)?",
"AC3-EVO",
]
# compile the blacklist into a regex
bl_re = re.compile("(" + "|".join(blacklist) + ")(\s|$)", re.IGNORECASE)
# Setup the sqlite database
def search(query, cursor):
# remove all instancer of 'WORD ' for WORD in blacklist
query = query.replace(".", " ")
query = bl_re.sub("", query)
year = re.search("(19|20)\d{2}", query)
if year:
year = year.group(0)
# Find the first relevant word
word = ""
for item in query.split(" "):
if item not in common_words and len(item) > 3:
word = item.replace("-", " ")
break
cursor.execute("SELECT * FROM movies WHERE movies MATCH ?",
["%s %s" % (word, year)])
ratio = 0
best = query
if year:
best = best.replace(year, "")
best = best.strip()
for item in cursor:
current = fuzz.ratio(item[0], query)
for word in item[0].split():
if word not in query:
current -= 10
if item[0] in query and len(item[0].split()) > 1:
ratio = 100
best = item[0]
elif current > ratio:
ratio = current
best = item[0]
return best
def is_valid_file(filename, excludes):
return str(mimetypes.guess_type(filename)[0]).find('video/') == 0 and \
not any(map(lambda x: bool(x.match(filename)), excludes))
def get_movies_list(dirname, excludes=None):
if excludes is None:
excludes = []
movies = []
for root, _, files in os.walk(dirname):
if any(map(lambda x: x.match(root), excludes)):
continue
movies += [(root, mov) for mov in files if is_valid_file(mov, excludes)]
return movies
def movemovie(src, dst, cursor):
filename, extension = os.path.splitext(src[1])
os.rename(os.path.join(src[0], src[1]),
"%s/%s%s" % (dst, search(filename, cursor),
extension))
def movemovies(dirname, dst, cursor, excludes=None):
for movie in get_movies_list(dirname, excludes):
movemovie(movie, dst, cursor)
if __name__ == "__main__":
conn = sqlite3.connect("movies.db")
cursor = conn.cursor()
import sys
print(search(sys.argv[1], cursor))
conn.close()
| 26.346154 | 80 | 0.55365 | import codecs
import mimetypes
import os
import re
import sqlite3
from fuzzywuzzy import fuzz
common_words = [
"The",
"Them",
"A",
"An",
"In",
]
# blacklist of common garbage that fills up movie names
blacklist = [
"BluRay",
"\d{3,4}p",
"(HD|DVD|BR)Rip",
"x\d{3}",
"XViD(-.*)?",
"AC3-EVO",
]
# compile the blacklist into a regex
bl_re = re.compile("(" + "|".join(blacklist) + ")(\s|$)", re.IGNORECASE)
# Setup the sqlite database
def search(query, cursor):
# remove all instancer of 'WORD ' for WORD in blacklist
query = query.replace(".", " ")
query = bl_re.sub("", query)
year = re.search("(19|20)\d{2}", query)
if year:
year = year.group(0)
# Find the first relevant word
word = ""
for item in query.split(" "):
if item not in common_words and len(item) > 3:
word = item.replace("-", " ")
break
cursor.execute("SELECT * FROM movies WHERE movies MATCH ?",
["%s %s" % (word, year)])
ratio = 0
best = query
if year:
best = best.replace(year, "")
best = best.strip()
for item in cursor:
current = fuzz.ratio(item[0], query)
for word in item[0].split():
if word not in query:
current -= 10
if item[0] in query and len(item[0].split()) > 1:
ratio = 100
best = item[0]
elif current > ratio:
ratio = current
best = item[0]
return best
def is_valid_file(filename, excludes):
return str(mimetypes.guess_type(filename)[0]).find('video/') == 0 and \
not any(map(lambda x: bool(x.match(filename)), excludes))
def get_movies_list(dirname, excludes=None):
if excludes is None:
excludes = []
movies = []
for root, _, files in os.walk(dirname):
if any(map(lambda x: x.match(root), excludes)):
continue
movies += [(root, mov) for mov in files if is_valid_file(mov, excludes)]
return movies
def movemovie(src, dst, cursor):
filename, extension = os.path.splitext(src[1])
os.rename(os.path.join(src[0], src[1]),
"%s/%s%s" % (dst, search(filename, cursor),
extension))
def movemovies(dirname, dst, cursor, excludes=None):
for movie in get_movies_list(dirname, excludes):
movemovie(movie, dst, cursor)
if __name__ == "__main__":
conn = sqlite3.connect("movies.db")
cursor = conn.cursor()
import sys
print(search(sys.argv[1], cursor))
conn.close()
| true | true |
1c4752ee09bf70092f224bcea3d2adc5f3dcac59 | 708 | py | Python | Switches.py | ProgrammingNerdGit/GBLS | 6fcc3acc4b2797ef7c97f6d88c42cef66f8e7b50 | [
"MIT"
] | 1 | 2020-11-04T18:50:54.000Z | 2020-11-04T18:50:54.000Z | Switches.py | ProgrammingNerdGit/GBLS | 6fcc3acc4b2797ef7c97f6d88c42cef66f8e7b50 | [
"MIT"
] | null | null | null | Switches.py | ProgrammingNerdGit/GBLS | 6fcc3acc4b2797ef7c97f6d88c42cef66f8e7b50 | [
"MIT"
] | null | null | null | class switch:
def __init__(self):
self.cases = []
self.triggered = False
def anyCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
for i in args:
if(args[i] and not self.triggered):
self.triggered = True
func()
def exclusiveCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
numOfExepts = 0
for i in args:
if(args[i] and not self.triggered):
numOfExepts += 1
if(numOfExepts == len(args)):
self.triggered = True
func()
def default(self,func):
if(not self.triggered):
func()
| 29.5 | 50 | 0.492938 | class switch:
def __init__(self):
self.cases = []
self.triggered = False
def anyCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
for i in args:
if(args[i] and not self.triggered):
self.triggered = True
func()
def exclusiveCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
numOfExepts = 0
for i in args:
if(args[i] and not self.triggered):
numOfExepts += 1
if(numOfExepts == len(args)):
self.triggered = True
func()
def default(self,func):
if(not self.triggered):
func()
| true | true |
1c4753ab0132900bf58f1a4ebd6b8e9c3f876049 | 924 | bzl | Python | tools/repositories.bzl | guibou/rules_haskell | ea0e70ace2432a490d4ab4c4e54617612466e584 | [
"Apache-2.0"
] | 222 | 2017-11-06T09:01:12.000Z | 2022-03-28T08:24:22.000Z | tools/repositories.bzl | guibou/rules_haskell | ea0e70ace2432a490d4ab4c4e54617612466e584 | [
"Apache-2.0"
] | 1,168 | 2017-11-19T07:43:13.000Z | 2022-03-31T12:40:39.000Z | tools/repositories.bzl | guibou/rules_haskell | ea0e70ace2432a490d4ab4c4e54617612466e584 | [
"Apache-2.0"
] | 94 | 2017-11-17T22:46:37.000Z | 2022-03-15T00:16:56.000Z | """Workspace rules (tools/repositories)"""
load("@rules_haskell//haskell:cabal.bzl", "stack_snapshot")
def rules_haskell_worker_dependencies(**stack_kwargs):
"""Provide all repositories that are necessary for `rules_haskell`'s tools to
function.
"""
excludes = native.existing_rules().keys()
if "rules_haskell_worker_dependencies" not in excludes:
stack_snapshot(
name = "rules_haskell_worker_dependencies",
packages = [
"base",
"bytestring",
"filepath",
"ghc",
"ghc-paths",
"microlens",
"process",
"profunctors-5.5.2",
"proto-lens-0.7.0.0",
"proto-lens-runtime-0.7.0.0",
"text",
"vector",
],
snapshot = "lts-18.0",
**stack_kwargs
)
| 29.806452 | 81 | 0.504329 |
load("@rules_haskell//haskell:cabal.bzl", "stack_snapshot")
def rules_haskell_worker_dependencies(**stack_kwargs):
excludes = native.existing_rules().keys()
if "rules_haskell_worker_dependencies" not in excludes:
stack_snapshot(
name = "rules_haskell_worker_dependencies",
packages = [
"base",
"bytestring",
"filepath",
"ghc",
"ghc-paths",
"microlens",
"process",
"profunctors-5.5.2",
"proto-lens-0.7.0.0",
"proto-lens-runtime-0.7.0.0",
"text",
"vector",
],
snapshot = "lts-18.0",
**stack_kwargs
)
| true | true |
1c4753ba6758fb3028d113543431f667163dd0f4 | 3,120 | py | Python | newproject_1/newproject_1/settings.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | null | null | null | newproject_1/newproject_1/settings.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | null | null | null | newproject_1/newproject_1/settings.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | null | null | null | # Scrapy settings for newproject_1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'newproject_1'
SPIDER_MODULES = ['newproject_1.spiders']
NEWSPIDER_MODULE = 'newproject_1.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'newproject_1 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'newproject_1.middlewares.Newproject1SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'newproject_1.middlewares.Newproject1DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'newproject_1.pipelines.Newproject1Pipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.05618 | 103 | 0.780769 |
BOT_NAME = 'newproject_1'
SPIDER_MODULES = ['newproject_1.spiders']
NEWSPIDER_MODULE = 'newproject_1.spiders'
ROBOTSTXT_OBEY = True
| true | true |
1c4753eff116b910c9c93958d56825d7720f1568 | 1,444 | py | Python | samples/generated_samples/dialogflow_v2_generated_versions_get_version_async.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 171 | 2018-09-19T21:16:18.000Z | 2020-12-07T17:41:10.000Z | samples/generated_samples/dialogflow_v2_generated_versions_get_version_async.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 150 | 2018-09-25T14:04:28.000Z | 2020-12-09T21:45:43.000Z | samples/generated_samples/dialogflow_v2_generated_versions_get_version_async.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 75 | 2018-09-22T14:12:18.000Z | 2020-12-08T07:12:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetVersion
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Versions_GetVersion_async]
from google.cloud import dialogflow_v2
async def sample_get_version():
# Create a client
client = dialogflow_v2.VersionsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.GetVersionRequest(
name="name_value",
)
# Make the request
response = await client.get_version(request=request)
# Handle the response
print(response)
# [END dialogflow_v2_generated_Versions_GetVersion_async]
| 31.391304 | 85 | 0.756925 |
from google.cloud import dialogflow_v2
async def sample_get_version():
client = dialogflow_v2.VersionsAsyncClient()
request = dialogflow_v2.GetVersionRequest(
name="name_value",
)
response = await client.get_version(request=request)
print(response)
| true | true |
1c4755892a095d9eed7918634a6edef5688ce027 | 1,624 | py | Python | sdks/python/http_client/v1/test/test_v1_list_searches_response.py | TariqAHassan/polyaxon | 6fc7f6a6ec49ef02d525887b6d18a893203e5b29 | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/test/test_v1_list_searches_response.py | TariqAHassan/polyaxon | 6fc7f6a6ec49ef02d525887b6d18a893203e5b29 | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/test/test_v1_list_searches_response.py | TariqAHassan/polyaxon | 6fc7f6a6ec49ef02d525887b6d18a893203e5b29 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon sdk
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.14.4
Contact: contact@polyaxon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import polyaxon_sdk
from polyaxon_sdk.models.v1_list_searches_response import V1ListSearchesResponse # noqa: E501
from polyaxon_sdk.rest import ApiException
class TestV1ListSearchesResponse(unittest.TestCase):
"""V1ListSearchesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ListSearchesResponse(self):
"""Test V1ListSearchesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = polyaxon_sdk.models.v1_list_searches_response.V1ListSearchesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.491228 | 119 | 0.738916 |
from __future__ import absolute_import
import unittest
import polyaxon_sdk
from polyaxon_sdk.models.v1_list_searches_response import V1ListSearchesResponse
from polyaxon_sdk.rest import ApiException
class TestV1ListSearchesResponse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1ListSearchesResponse(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c475637e60225ae646c1b529f1fa216fb2c6c1a | 10,082 | py | Python | doc/source/conf.py | genomicsengland/gel-coverage | 61a671a53ac52a0b62c8aea983ced65fd0bed6cc | [
"Apache-2.0"
] | 2 | 2019-07-15T08:13:22.000Z | 2020-09-30T18:47:59.000Z | doc/source/conf.py | genomicsengland/gel-coverage | 61a671a53ac52a0b62c8aea983ced65fd0bed6cc | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | genomicsengland/gel-coverage | 61a671a53ac52a0b62c8aea983ced65fd0bed6cc | [
"Apache-2.0"
] | null | null | null | import sphinx_rtd_theme
# -*- coding: utf-8 -*-
#
# GelCoverage documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 13 14:37:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GelCoverage'
copyright = u'2016, Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
author = u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'GelCoverage v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GelCoveragedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GelCoverage.tex', u'GelCoverage Documentation',
u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gelcoverage', u'GelCoverage Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GelCoverage', u'GelCoverage Documentation',
author, 'GelCoverage', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 29.223188 | 93 | 0.706903 | import sphinx_rtd_theme
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'GelCoverage'
copyright = u'2016, Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
author = u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'GelCoverage v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GelCoveragedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GelCoverage.tex', u'GelCoverage Documentation',
u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gelcoverage', u'GelCoverage Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GelCoverage', u'GelCoverage Documentation',
author, 'GelCoverage', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
1c475707181d966447b38a87fe651934c279aaa0 | 1,151 | py | Python | aiida/tools/importexport/__init__.py | aiace9/aiida-core | 09ac91654648adb684a58d5d2d7b1c11a503dae8 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-10-01T17:11:58.000Z | 2020-10-01T17:11:58.000Z | aiida/tools/importexport/__init__.py | blokhin/aiida-core | 29331b558b45ba74acf1ca633a2d8bfabc1bdd05 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2019-03-06T11:23:42.000Z | 2020-03-09T09:34:07.000Z | aiida/tools/importexport/__init__.py | blokhin/aiida-core | 29331b558b45ba74acf1ca633a2d8bfabc1bdd05 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=wildcard-import,undefined-variable
"""Provides import/export functionalities.
To see history/git blame prior to the move to aiida.tools.importexport,
explore tree: https://github.com/aiidateam/aiida-core/tree/eebef392c81e8b130834a92e1d7abf5e2e30b3ce
Functionality: <tree>/aiida/orm/importexport.py
Tests: <tree>/aiida/backends/tests/test_export_and_import.py
"""
from .dbexport import *
from .dbimport import *
from .common import *
__all__ = (dbexport.__all__ + dbimport.__all__ + common.__all__)
| 47.958333 | 99 | 0.564726 | true | true | |
1c47573535fc8458d412b298db9ec2766ec449c9 | 645 | py | Python | modules/sample/src/sample/CSV/pf.py | AsmaBRZ/rcrs-server | d67a84a17b73dd95c5553bed68b8c4c08cd5651a | [
"BSD-3-Clause"
] | null | null | null | modules/sample/src/sample/CSV/pf.py | AsmaBRZ/rcrs-server | d67a84a17b73dd95c5553bed68b8c4c08cd5651a | [
"BSD-3-Clause"
] | null | null | null | modules/sample/src/sample/CSV/pf.py | AsmaBRZ/rcrs-server | d67a84a17b73dd95c5553bed68b8c4c08cd5651a | [
"BSD-3-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import os
time=np.arange(1,301)
array=np.zeros(250)
a=[]
fichiers=os.listdir("d")
for f in fichiers:
print(f)
i=0
with open("d/"+f, "r") as ins:
for line in ins:
if i<300:
print(line)
l=line.split(" ")
print(int(l[1]))
print(i)
print('jjjjjjjj')
print(array[i])
array[i]=array[i]+int(l[1])
i=i+1
print (array)
plt.plot(array)
plt.ylabel("Nombre d'obstacles nettoyés")
plt.xlabel('Temps')
plt.suptitle('Agent random')
plt.show()
| 18.970588 | 43 | 0.516279 | import matplotlib.pyplot as plt
import numpy as np
import os
time=np.arange(1,301)
array=np.zeros(250)
a=[]
fichiers=os.listdir("d")
for f in fichiers:
print(f)
i=0
with open("d/"+f, "r") as ins:
for line in ins:
if i<300:
print(line)
l=line.split(" ")
print(int(l[1]))
print(i)
print('jjjjjjjj')
print(array[i])
array[i]=array[i]+int(l[1])
i=i+1
print (array)
plt.plot(array)
plt.ylabel("Nombre d'obstacles nettoyés")
plt.xlabel('Temps')
plt.suptitle('Agent random')
plt.show()
| true | true |
1c47577594847e925fd3f69b3081b42da3d8500b | 49,232 | py | Python | tests/test_data_tokenizers.py | sxjscience/gluon-nlp | e6c39a80f4155cdb9c5fe8145287ddd322b4952b | [
"Apache-2.0"
] | 1 | 2020-03-20T08:01:34.000Z | 2020-03-20T08:01:34.000Z | tests/test_data_tokenizers.py | sxjscience/gluon-nlp | e6c39a80f4155cdb9c5fe8145287ddd322b4952b | [
"Apache-2.0"
] | null | null | null | tests/test_data_tokenizers.py | sxjscience/gluon-nlp | e6c39a80f4155cdb9c5fe8145287ddd322b4952b | [
"Apache-2.0"
] | null | null | null | import pytest
import random
import collections
import pickle
from uuid import uuid4
import os
import unicodedata
import tempfile
from pkg_resources import parse_version
import gluonnlp
from gluonnlp.data.tokenizers import WhitespaceTokenizer, MosesTokenizer, JiebaTokenizer,\
SpacyTokenizer, SubwordNMTTokenizer, YTTMTokenizer, SentencepieceTokenizer, \
HuggingFaceBPETokenizer, HuggingFaceByteBPETokenizer, HuggingFaceWordPieceTokenizer, \
HuggingFaceTokenizer
from gluonnlp.base import get_repo_url
from gluonnlp.data import Vocab
from gluonnlp.utils.misc import download
EN_SAMPLES = ['Four score and seven years ago our fathers brought forth on this continent, '
'a new nation, conceived in Liberty, and dedicated to the proposition '
'that all men are created equal.',
'In spite of the debate going on for months about the photos of Özil with the '
'Turkish President Recep Tayyip Erdogan, he regrets the return of '
'the 92-match national player Özil.']
DE_SAMPLES = ['Goethe stammte aus einer angesehenen bürgerlichen Familie; sein Großvater'
' mütterlicherseits war als Stadtschultheiß höchster Justizbeamter der'
' Stadt Frankfurt, sein Vater Doktor der Rechte und kaiserlicher Rat.',
'"Das ist eine Frage, die natürlich davon abhängt, dass man einmal ins '
'Gespräch kommt, dass man mit ihm auch darüber spricht, warum er das eine '
'oder andere offenbar so empfunden hat, wie das in seinem Statement niedergelegt'
' ist", sagte Grindel im Fußball-Podcast "Phrasenmäher" der "Bild-Zeitung.']
ZH_SAMPLES = ['苟活者在淡红的血色中,会依稀看见微茫的希望;真的猛士,将更奋然而前行。',
'参加工作,哈尔滨工业大学无线电工程系电子仪器及测量技术专业毕业。']
SUBWORD_TEST_SAMPLES = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
def random_inject_space(sentence):
words = sentence.split()
ret = ''
for i, word in enumerate(words):
ret += word
if i < len(words) - 1:
n_space_tokens = random.randint(1, 10)
for j in range(n_space_tokens):
ret += random.choice([' ', '\t', '\r', '\n'])
return ret
def verify_encode_token_with_offsets(tokenizer, all_sentences, gt_offsets=None):
if gt_offsets is None:
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence in\
zip(tokens, enc_tokens, offsets, sentences):
for tok, offset, enc_tok in zip(ele_tokens, ele_offsets, ele_enc_tokens):
assert ele_sentence[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for tok, offset, enc_tok in zip(tokens, offsets, enc_tokens):
assert sentences[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for sentences, ele_gt_offsets in [(all_sentences[0], gt_offsets[0]),
(all_sentences, gt_offsets)]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
assert ele_gt_offsets == offsets
assert enc_tokens == tokens
def verify_sentencepiece_tokenizer_with_offsets(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence\
in zip(tokens, enc_tokens, offsets, sentences):
for i, (tok, offset, enc_tok) in enumerate(zip(ele_tokens, ele_offsets,
ele_enc_tokens)):
assert tok == enc_tok
ele_sel_tok = unicodedata.normalize('NFKC',
ele_sentence[offset[0]:offset[1]]).strip()
if tokenizer.is_first_subword(tok):
real_tok = tok[1:]
else:
real_tok = tok
assert ele_sel_tok == real_tok,\
'ele_sel_tok={}, real_tok={}'.format(ele_sel_tok, real_tok)
def verify_encode_with_offsets_consistency(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, int)
tokens, offsets = tokenizer.encode_with_offsets(sentences, int)
str_tokens, str_offsets = tokenizer.encode_with_offsets(sentences, str)
assert offsets == str_offsets
assert tokens == enc_tokens
def verify_encode_token(tokenizer, all_sentences, all_gt_tokens):
for sentences, gt_tokens in [(all_sentences[0], all_gt_tokens[0]),
(all_sentences, all_gt_tokens)]:
tokenizer_encode_ret = tokenizer.encode(sentences)
assert tokenizer_encode_ret == gt_tokens,\
'Whole Encoded: {}, \nWhole GT: {}'.format(tokenizer_encode_ret, gt_tokens)
def verify_decode(tokenizer, all_sentences, out_type=str):
for sentences in [all_sentences[0], all_sentences]:
assert tokenizer.decode(tokenizer.encode(sentences, out_type)) == sentences
def verify_decode_spm(tokenizer, all_sentences, gt_int_decode_sentences):
for sentences, case_gt_int_decode in [(all_sentences[0], gt_int_decode_sentences[0]),
(all_sentences, gt_int_decode_sentences)]:
if isinstance(sentences, str):
gt_str_decode_sentences = sentences
if tokenizer.lowercase:
gt_str_decode_sentences = gt_str_decode_sentences.lower()
gt_str_decode_sentences = unicodedata.normalize('NFKC', gt_str_decode_sentences)
elif isinstance(sentences, list):
gt_str_decode_sentences = []
for ele in sentences:
ele_gt_decode = ele
if tokenizer.lowercase:
ele_gt_decode = ele_gt_decode.lower()
ele_gt_decode = unicodedata.normalize('NFKC', ele_gt_decode)
gt_str_decode_sentences.append(ele_gt_decode)
else:
raise NotImplementedError
assert tokenizer.decode(tokenizer.encode(sentences, str)) == gt_str_decode_sentences
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_subword_nmt(tokenizer, all_sentences, gt_int_decode, gt_str_decode):
for sentences, case_gt_int_decode, case_gt_str_decode in [(all_sentences[0], gt_int_decode[0], gt_str_decode[0]),
(all_sentences, gt_int_decode, gt_str_decode)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_str_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_hf(tokenizer, all_sentences, gt_decode_sentences):
for sentences, case_gt_decode in [(all_sentences[0], gt_decode_sentences[0]),
(all_sentences, gt_decode_sentences)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_decode
if isinstance(sentences, list):
for sentence in sentences:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentence, int))\
== tokenizer.encode(sentence, str)
assert tokenizer.vocab[tokenizer.encode(sentence, str)]\
== tokenizer.encode(sentence, int)
else:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentences, int)) \
== tokenizer.encode(sentences, str)
assert tokenizer.vocab[tokenizer.encode(sentences, str)] \
== tokenizer.encode(sentences, int)
def verify_decode_no_vocab_raise(tokenizer):
# When the vocab is not attached, should raise ValueError
for sentences in [EN_SAMPLES[0], EN_SAMPLES]:
with pytest.raises(ValueError):
tokenizer.encode(sentences, int)
with pytest.raises(ValueError):
tokenizer.decode([0])
with pytest.raises(ValueError):
tokenizer.decode([[0], [1]])
def verify_pickleble(tokenizer, cls):
print(tokenizer)
# Verify if the tokenizer is pickleable and has the same behavior after dumping/loading
tokenizer_p = pickle.loads(pickle.dumps(tokenizer))
assert isinstance(tokenizer_p, cls)
assert tokenizer.encode(SUBWORD_TEST_SAMPLES, str) == tokenizer_p.encode(SUBWORD_TEST_SAMPLES, str)
def test_whitespace_tokenizer():
tokenizer = WhitespaceTokenizer()
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers', 'brought',
'forth', 'on', 'this', 'continent,', 'a', 'new', 'nation,', 'conceived',
'in', 'Liberty,', 'and', 'dedicated', 'to', 'the', 'proposition', 'that',
'all', 'men', 'are', 'created', 'equal.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan,', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie;', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt,', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat.'],
['"Das', 'ist', 'eine', 'Frage,', 'die', 'natürlich', 'davon', 'abhängt,',
'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt,', 'dass', 'man', 'mit',
'ihm', 'auch', 'darüber', 'spricht,', 'warum', 'er', 'das', 'eine', 'oder',
'andere', 'offenbar', 'so', 'empfunden', 'hat,', 'wie', 'das', 'in',
'seinem', 'Statement', 'niedergelegt', 'ist",', 'sagte', 'Grindel', 'im',
'Fußball-Podcast', '"Phrasenmäher"', 'der', '"Bild-Zeitung.']]
for _ in range(2):
# Inject noise and test for encode
noisy_en_samples = [random_inject_space(ele) for ele in EN_SAMPLES]
noisy_de_samples = [random_inject_space(ele) for ele in DE_SAMPLES]
verify_encode_token(tokenizer, noisy_en_samples + noisy_de_samples,
gt_en_tokenized + gt_de_tokenized)
# Test for decode
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, str)
# Test for encode_with_offsets
verify_encode_token_with_offsets(tokenizer, noisy_en_samples + noisy_de_samples)
verify_decode_no_vocab_raise(tokenizer)
# Test for output_type = int
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized,
[])))
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, int)
verify_pickleble(tokenizer, WhitespaceTokenizer)
verify_encode_token_with_offsets(tokenizer, EN_SAMPLES + DE_SAMPLES)
def test_moses_tokenizer():
en_tokenizer = MosesTokenizer('en')
de_tokenizer = MosesTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt', ',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden',
'hat', ',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt',
'ist', '"', ',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast',
'"', 'Phrasenmäher', '"', 'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
verify_decode(en_tokenizer, EN_SAMPLES, str)
verify_decode(de_tokenizer, DE_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
verify_decode_no_vocab_raise(en_tokenizer)
verify_decode_no_vocab_raise(de_tokenizer)
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_decode(en_tokenizer, EN_SAMPLES, int)
verify_decode(de_tokenizer, DE_SAMPLES, int)
verify_pickleble(en_tokenizer, MosesTokenizer)
verify_pickleble(de_tokenizer, MosesTokenizer)
def test_jieba_tokenizer():
tokenizer = JiebaTokenizer()
gt_zh_tokenized = [['苟活', '者', '在', '淡红', '的', '血色', '中', ',',
'会', '依稀', '看见', '微茫', '的', '希望', ';', '真的',
'猛士', ',', '将', '更奋', '然而', '前行', '。'],
['参加', '工作', ',', '哈尔滨工业大学', '无线电', '工程系', '电子仪器',
'及', '测量', '技术', '专业', '毕业', '。']]
verify_encode_token(tokenizer, ZH_SAMPLES, gt_zh_tokenized)
verify_decode(tokenizer, ZH_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_zh_tokenized, [])))
verify_decode_no_vocab_raise(tokenizer)
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, ZH_SAMPLES, int)
verify_pickleble(tokenizer, JiebaTokenizer)
def test_spacy_tokenizer():
en_tokenizer = SpacyTokenizer('en')
de_tokenizer = SpacyTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt', 'Frankfurt',
',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und', 'kaiserlicher',
'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden', 'hat',
',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt', 'ist', '"',
',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast', '"', 'Phrasenmäher', '"',
'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_pickleble(en_tokenizer, SpacyTokenizer)
verify_pickleble(de_tokenizer, SpacyTokenizer)
verify_encode_token_with_offsets(en_tokenizer, EN_SAMPLES)
verify_encode_token_with_offsets(de_tokenizer, DE_SAMPLES)
# Test for loading spacy tokenizer from specifying the "model" flag
en_tokenizer = SpacyTokenizer(model='en_core_web_lg')
out = en_tokenizer.encode(EN_SAMPLES)
def test_yttm_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'yttm.model')
download(url=get_repo_url() + 'tokenizer_test_models/yttm/test_ende_yttm-6f2c39.model',
path=model_path)
tokenizer = YTTMTokenizer(model_path=model_path)
gt_tokenized = [['▁He', 'll', 'o', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁',
'Ⅷ', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '-A', 'm', 'az', 'on', '-H', 'a', 'ib',
'in', '-L', 'e', 'on', 'ard', '-S', 'hen', 'g', '-S', 'h', 'u', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 4), (4, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 10), (10, 11), (11, 13),
(13, 15), (15, 17), (17, 18), (18, 20), (20, 22), (22, 24), (24, 25), (25, 27),
(27, 30), (30, 32), (32, 35), (35, 36), (36, 38), (38, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53), (53, 54),
(54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62),
(62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y<UNK>all! How are you <UNK> <UNK> <UNK> <UNK> ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# <UNK>abc<UNK>']
gt_str_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, YTTMTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
# Begin to verify decode
for sample_sentences, ele_gt_int_decode, ele_gt_str_decode in [(SUBWORD_TEST_SAMPLES[0], gt_int_decode[0], gt_str_decode[0]),
(SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)]:
int_decode = tokenizer.decode(tokenizer.encode(sample_sentences, int))
str_decode = tokenizer.decode(tokenizer.encode(sample_sentences, str))
assert int_decode == ele_gt_int_decode
assert str_decode == ele_gt_str_decode
os.remove(model_path)
assert tokenizer.decode([]) == ''
assert tokenizer.decode([[]]) == ['']
@pytest.mark.seed(123)
def test_sentencepiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'spm.model')
download(url=get_repo_url()
+ 'tokenizer_test_models/sentencepiece/case1/test_ende-a9bee4.model',
path=model_path)
# Case1
tokenizer = SentencepieceTokenizer(model_path)
gt_tokenized = [['▁Hel', 'lo', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you',
'▁', 'VI', 'II', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '-', 'A', 'ma', 'zo', 'n', '-', 'H', 'ai',
'bin', '-', 'L', 'e', 'on', 'ard', '-', 'S', 'hen', 'g', '-', 'S', 'hu', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!', '@',
'#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 26), (26, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 12),
(12, 14), (14, 15), (15, 16), (16, 17), (17, 19), (19, 22), (22, 23), (23, 24),
(24, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36), (36, 37),
(37, 38), (38, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48), (48, 51),
(51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (61, 62), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y ⁇ all! How are you VIII ⁇ ⁇ ⁇ ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:! ⁇ # ⁇ abc ⁇ ']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
# Case2, lower_case
gt_lower_case_int_decode = ['hello, y ⁇ all! how are you viii ⁇ ⁇ ⁇ ?',
'gluonnlp is great!!!!!!',
'gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:! ⁇ # ⁇ abc ⁇ ']
tokenizer = SentencepieceTokenizer(model_path, lowercase=True)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_lower_case_int_decode)
# Case3, Use the sentencepiece regularization commands, we test whether we can obtain different encoding results
tokenizer = SentencepieceTokenizer(model_path, lowercase=True, nbest=-1, alpha=1.0)
has_different_encode_out = False
encode_out = None
for _ in range(10):
if encode_out is None:
encode_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
else:
ele_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
if ele_out != encode_out:
has_different_encode_out = True
break
assert has_different_encode_out
os.remove(model_path)
def test_subword_nmt_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'subword_nmt.model')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende-d189ff.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'subword_nmt.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende_vocab-900f81.json',
path=vocab_path)
# Case 1
tokenizer = SubwordNMTTokenizer(model_path, vocab_path)
gt_tokenized = [["Hel", "lo", ",</w>", "y", "\'", "all", "!</w>", "How</w>", "are</w>", "you</w>",
"Ⅷ</w>", "😁</w>", "😁</w>", "😁</w>", "?</w>"],
["Gl", "u", "on", "N", "L", "P</w>", "is</w>", "great", "!", "!", "!", "!!",
"!</w>"],
["Gl", "u", "on", "N", "L", "P", "-", "Amaz", "on-", "H", "ai", "b", "in-", "Le",
"on", "ard", "-", "Sh", "eng", "-", "Sh", "u", "ai", "-", "X", "ing", "ji",
"an", "..", "...", "/", ":", "!", "@", "#</w>", "\'", "ab", "c", "\'</w>"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 16),
(16, 17), (17, 19), (19, 20), (20, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 33), (33, 36), (36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 49), (49, 51), (51, 53), (53, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ["Hello, y\'all! How are you Ⅷ 😁 😁 😁 ?",
"GluonNLP is great!!!!!!",
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# \'abc\'"]
gt_str_decode = SUBWORD_TEST_SAMPLES
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SubwordNMTTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_subword_nmt(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)
# Case 2, bpe_dropout
# We use str decode here because we may not perfectly recover the original sentence with int decode.
tokenizer = SubwordNMTTokenizer(model_path, vocab_path, bpe_dropout=0.5)
verify_decode(tokenizer, SUBWORD_TEST_SAMPLES, out_type=str)
os.remove(model_path)
os.remove(vocab_path)
def test_huggingface_bpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'test_hf_bpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'test_hf_bpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'test_hf_bpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello</w>', ',</w>', 'y</w>', "'</w>", 'all</w>', '!</w>', 'How</w>',
'are</w>', 'you</w>', '<unk>', '<unk>', '<unk>', '<unk>', '?</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', 'is</w>', 'great</w>', '!</w>', '!</w>',
'!</w>', '!</w>', '!</w>', '!</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', '-</w>', 'Amazon</w>', '-</w>', 'H', 'ai',
'bin</w>', '-</w>', 'Leonard</w>', '-</w>', 'Sh', 'en', 'g</w>', '-</w>',
'Sh', 'u', 'ai</w>', '-</w>', 'X', 'ing', 'j', 'ian</w>', '.</w>', '.</w>',
'.</w>', '.</w>', '.</w>', '/</w>', ':</w>', '!</w>', '@</w>', '#</w>',
"'</w>", 'ab', 'c</w>', "'</w>"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21), (22, 25),
(26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (9, 11), (12, 17), (17, 18), (18, 19),
(19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16), (16, 17),
(17, 19), (19, 22), (22, 23), (23, 30), (30, 31), (31, 33), (33, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48),
(48, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
# gt_int_decode = gt_str_decode for hf
# hf removed the unk tokens in decode result
gt_decode = ["Hello , y ' all ! How are you ?",
'GluonNLP is great ! ! ! ! ! !',
"GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian . . . . . / : ! @ # ' abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_decode = ["hello , y ' all ! how are you ?",
'gluonnlp is great ! ! ! ! ! !',
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian . . . . . / : ! @ # ' abc '"]
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_bytebpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'hf_bytebpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_bytebpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'Ġ?'],
['Gl', 'u', 'on', 'N', 'LP', 'Ġis', 'Ġgreat', 'ï¼', 'ģ', 'ï¼',
'ģ', 'ï¼', 'ģ', '!!!'],
['Gl', 'u', 'on', 'N', 'LP', '-', 'Amazon', '-', 'Ha', 'ib', 'in',
'-', 'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u',
'ai', '-', 'X', 'ing', 'j', 'ian', '.....', '/', ':', '!', '@',
'#', "Ġ'", 'ab', 'c', "'"]]
# the defination of the offsets of bytelevel seems not clear
gt_offsets = [[(0, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 27), (26, 27), (26, 27), (27, 29), (28, 29), (29, 31),
(30, 31), (31, 33), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18),
(17, 18), (18, 19), (18, 19), (19, 20), (19, 20), (20, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16),
(16, 18), (18, 20), (20, 22), (22, 23), (23, 25), (25, 27), (27, 30),
(30, 31), (31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 56),
(56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_int_decode = ["hello, y'all! how are you ⅷ 😁 😁 😁 ?",
'gluonnlp is great!!!!!!',
"gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:!@# 'abc'"]
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_int_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceByteBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 'hf_wordpiece.vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_wordpiece.hf_vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.hf_vocab',
path=hf_vocab_path)
# Case 1, lowercase=True
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=True)
gt_tokenized = [["hello", ",", "y", "'", "all", "!", "how", "are", "you",
"<unk>", "<unk>", "<unk>", "<unk>", "?"],
["gl", "##uo", "##nn", "##l", "##p", "is", "great", "\uff01",
"\uff01", "\uff01", "!", "!", "!"],
["gl", "##uo", "##nn", "##l", "##p", "-", "amazon", "-", "hai",
"##bin", "-", "leonard", "-", "shen", "##g", "-", "shu", "##ai", "-",
"xin", "##g", "##ji", "##an", ".", ".", ".", ".", ".", "/", ":", "!",
"@", "#", "'", "abc", "'"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (8, 9), (9, 15), (15, 16), (16, 19),
(19, 22), (22, 23), (23, 30), (30, 31), (31, 35), (35, 36), (36, 37), (37, 40),
(40, 42), (42, 43), (43, 46), (46, 47), (47, 49), (49, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61),
(62, 63), (63, 66), (66, 67)]]
gt_decode = ["hello, y'all! how are you?",
"gluonnlp is great ! ! !!!!",
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian..... / :! @ #'abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=False
gt_lowercase_decode = [", y'all! are you?",
"is great ! ! !!!!",
"- - - - - -..... / :! @ #'abc '"]
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=False)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceWordPieceTokenizer(hf_vocab_path, lowercase=True)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(vocab_path)
os.remove(hf_vocab_path)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_wordpiece_tokenizer_v08():
"""Test for huggingface tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.model',
path=model_path,
sha1_hash='66ccadf6e5e354ff9604e4a82f107a2ac873abd5')
vocab_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.vocab',
path=vocab_path,
sha1_hash='dd6fdf4bbc74eaa8806d12cb3d38a4d9a306aea8')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['Hel', '##lo', ',', 'y', '[UNK]', 'all', '!',
'How', 'are', 'you', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '?'],
['Gl', '##u', '##on', '##N', '##L', '##P', 'is', 'great', '[UNK]',
'[UNK]', '[UNK]', '!', '!', '!'],
['Gl', '##u', '##on', '##N', '##L', '##P', '-',
'Am', '##az', '##on', '-', 'Ha', '##ibi', '##n', '-', 'Leon', '##ard',
'-', 'She', '##n', '##g', '-', 'Sh', '##ua', '##i', '-', 'X',
'##ing', '##j', '##ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '[UNK]', 'ab', '##c', '[UNK]']]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13),
(14, 17), (18, 21), (22, 25), (26, 27), (28, 29), (30, 31),
(32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9),
(9, 11), (11, 13), (13, 15), (15, 16), (16, 18), (18, 21),
(21, 22), (22, 23), (23, 27), (27, 30), (30, 31), (31, 34),
(34, 35), (35, 36), (36, 37), (37, 39), (39, 41), (41, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52),
(52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66),
(66, 67)]]
gt_decode = ['Hello, y all! How are you?',
'GluonNLP is great!!!',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian..... / '
':! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bpe_tokenizer_v08():
"""Test for huggingface BPE tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.model',
path=model_path,
sha1_hash='ecda90979561ca4c5a8d769b5e3c9fa2270d5317')
vocab_path = os.path.join(dir_path, 'hf_bpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.vocab',
path=vocab_path,
sha1_hash='b92dde0b094f405208f3ec94b5eae88430bf4262')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['H', 'ello</w>', ',</w>', 'y</w>', 'all</w>', '!</w>',
'How</w>', 'are</w>', 'you</w>', '?</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', 'is</w>', 'great</w>',
'!</w>', '!</w>', '!</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', '-</w>', 'Amaz', 'on</w>',
'-</w>', 'Ha', 'i', 'bin</w>', '-</w>', 'Leon', 'ard</w>', '-</w>',
'Sh', 'eng</w>', '-</w>', 'S', 'hu', 'ai</w>', '-</w>', 'X', 'ing',
'j', 'ian</w>', '.</w>', '.</w>', '.</w>', '.</w>', '.</w>', '/</w>',
':</w>', '!</w>', '@</w>', '#</w>', 'ab', 'c</w>']]
gt_offsets = [[(0, 1), (1, 5), (5, 6), (7, 8), (9, 12), (12, 13), (14, 17),
(18, 21), (22, 25), (34, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 15),
(15, 16), (16, 18), (18, 19), (19, 22), (22, 23), (23, 27), (27, 30),
(30, 31), (31, 33), (33, 36), (36, 37), (37, 38), (38, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (63, 65), (65, 66)]]
gt_decode = ['Hello , y all ! How are you ?',
'GluonNLP is great ! ! !',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian'
' . . . . . / : ! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bytebpe_tokenizer_v08():
"""Test for huggingface bytebpe tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.model',
path=model_path,
sha1_hash='a1c4da1f6c21df923e150f56dbb5b7a53c61808b')
vocab_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.vocab',
path=vocab_path,
sha1_hash='7831b19078a3222f450e65b2188dc0770473123b')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['He', 'llo', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ', 'ð', 'Ł', 'ĺ',
'ģ', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ?'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', 'Ġis', 'Ġgreat', 'ï', '¼', 'ģ',
'ï', '¼', 'ģ', 'ï', '¼', 'ģ', '!', '!', '!'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', '-', 'Am', 'az', 'on', '-',
'Ha', 'ib', 'in', '-', 'Le', 'on', 'ard', '-', 'S', 'hen', 'g', '-',
'Sh', 'u', 'ai', '-', 'X', 'ing', 'j', 'ian',
'..', '...', '/', ':', '!', '@', '#', 'Ġ', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 27), (26, 27), (26, 27), (27, 28), (28, 29),
(28, 29), (28, 29), (28, 29), (29, 30), (30, 31), (30, 31), (30, 31),
(30, 31), (31, 32), (32, 33), (32, 33), (32, 33), (32, 33), (33, 35)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17),
(17, 18), (17, 18), (17, 18), (18, 19), (18, 19), (18, 19), (19, 20),
(19, 20), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 11),
(11, 13), (13, 15), (15, 16), (16, 18), (18, 20), (20, 22), (22, 23),
(23, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 51), (51, 53), (53, 56), (56, 57),
(57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
def test_tokenizers_create():
tokenizer = gluonnlp.data.tokenizers.create('moses', 'en')
tokenizer.encode('hello world!')
| 62.318987 | 133 | 0.50518 | import pytest
import random
import collections
import pickle
from uuid import uuid4
import os
import unicodedata
import tempfile
from pkg_resources import parse_version
import gluonnlp
from gluonnlp.data.tokenizers import WhitespaceTokenizer, MosesTokenizer, JiebaTokenizer,\
SpacyTokenizer, SubwordNMTTokenizer, YTTMTokenizer, SentencepieceTokenizer, \
HuggingFaceBPETokenizer, HuggingFaceByteBPETokenizer, HuggingFaceWordPieceTokenizer, \
HuggingFaceTokenizer
from gluonnlp.base import get_repo_url
from gluonnlp.data import Vocab
from gluonnlp.utils.misc import download
EN_SAMPLES = ['Four score and seven years ago our fathers brought forth on this continent, '
'a new nation, conceived in Liberty, and dedicated to the proposition '
'that all men are created equal.',
'In spite of the debate going on for months about the photos of Özil with the '
'Turkish President Recep Tayyip Erdogan, he regrets the return of '
'the 92-match national player Özil.']
DE_SAMPLES = ['Goethe stammte aus einer angesehenen bürgerlichen Familie; sein Großvater'
' mütterlicherseits war als Stadtschultheiß höchster Justizbeamter der'
' Stadt Frankfurt, sein Vater Doktor der Rechte und kaiserlicher Rat.',
'"Das ist eine Frage, die natürlich davon abhängt, dass man einmal ins '
'Gespräch kommt, dass man mit ihm auch darüber spricht, warum er das eine '
'oder andere offenbar so empfunden hat, wie das in seinem Statement niedergelegt'
' ist", sagte Grindel im Fußball-Podcast "Phrasenmäher" der "Bild-Zeitung.']
ZH_SAMPLES = ['苟活者在淡红的血色中,会依稀看见微茫的希望;真的猛士,将更奋然而前行。',
'参加工作,哈尔滨工业大学无线电工程系电子仪器及测量技术专业毕业。']
SUBWORD_TEST_SAMPLES = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
def random_inject_space(sentence):
words = sentence.split()
ret = ''
for i, word in enumerate(words):
ret += word
if i < len(words) - 1:
n_space_tokens = random.randint(1, 10)
for j in range(n_space_tokens):
ret += random.choice([' ', '\t', '\r', '\n'])
return ret
def verify_encode_token_with_offsets(tokenizer, all_sentences, gt_offsets=None):
if gt_offsets is None:
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence in\
zip(tokens, enc_tokens, offsets, sentences):
for tok, offset, enc_tok in zip(ele_tokens, ele_offsets, ele_enc_tokens):
assert ele_sentence[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for tok, offset, enc_tok in zip(tokens, offsets, enc_tokens):
assert sentences[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for sentences, ele_gt_offsets in [(all_sentences[0], gt_offsets[0]),
(all_sentences, gt_offsets)]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
assert ele_gt_offsets == offsets
assert enc_tokens == tokens
def verify_sentencepiece_tokenizer_with_offsets(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence\
in zip(tokens, enc_tokens, offsets, sentences):
for i, (tok, offset, enc_tok) in enumerate(zip(ele_tokens, ele_offsets,
ele_enc_tokens)):
assert tok == enc_tok
ele_sel_tok = unicodedata.normalize('NFKC',
ele_sentence[offset[0]:offset[1]]).strip()
if tokenizer.is_first_subword(tok):
real_tok = tok[1:]
else:
real_tok = tok
assert ele_sel_tok == real_tok,\
'ele_sel_tok={}, real_tok={}'.format(ele_sel_tok, real_tok)
def verify_encode_with_offsets_consistency(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, int)
tokens, offsets = tokenizer.encode_with_offsets(sentences, int)
str_tokens, str_offsets = tokenizer.encode_with_offsets(sentences, str)
assert offsets == str_offsets
assert tokens == enc_tokens
def verify_encode_token(tokenizer, all_sentences, all_gt_tokens):
for sentences, gt_tokens in [(all_sentences[0], all_gt_tokens[0]),
(all_sentences, all_gt_tokens)]:
tokenizer_encode_ret = tokenizer.encode(sentences)
assert tokenizer_encode_ret == gt_tokens,\
'Whole Encoded: {}, \nWhole GT: {}'.format(tokenizer_encode_ret, gt_tokens)
def verify_decode(tokenizer, all_sentences, out_type=str):
for sentences in [all_sentences[0], all_sentences]:
assert tokenizer.decode(tokenizer.encode(sentences, out_type)) == sentences
def verify_decode_spm(tokenizer, all_sentences, gt_int_decode_sentences):
for sentences, case_gt_int_decode in [(all_sentences[0], gt_int_decode_sentences[0]),
(all_sentences, gt_int_decode_sentences)]:
if isinstance(sentences, str):
gt_str_decode_sentences = sentences
if tokenizer.lowercase:
gt_str_decode_sentences = gt_str_decode_sentences.lower()
gt_str_decode_sentences = unicodedata.normalize('NFKC', gt_str_decode_sentences)
elif isinstance(sentences, list):
gt_str_decode_sentences = []
for ele in sentences:
ele_gt_decode = ele
if tokenizer.lowercase:
ele_gt_decode = ele_gt_decode.lower()
ele_gt_decode = unicodedata.normalize('NFKC', ele_gt_decode)
gt_str_decode_sentences.append(ele_gt_decode)
else:
raise NotImplementedError
assert tokenizer.decode(tokenizer.encode(sentences, str)) == gt_str_decode_sentences
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_subword_nmt(tokenizer, all_sentences, gt_int_decode, gt_str_decode):
for sentences, case_gt_int_decode, case_gt_str_decode in [(all_sentences[0], gt_int_decode[0], gt_str_decode[0]),
(all_sentences, gt_int_decode, gt_str_decode)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_str_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_hf(tokenizer, all_sentences, gt_decode_sentences):
for sentences, case_gt_decode in [(all_sentences[0], gt_decode_sentences[0]),
(all_sentences, gt_decode_sentences)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_decode
if isinstance(sentences, list):
for sentence in sentences:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentence, int))\
== tokenizer.encode(sentence, str)
assert tokenizer.vocab[tokenizer.encode(sentence, str)]\
== tokenizer.encode(sentence, int)
else:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentences, int)) \
== tokenizer.encode(sentences, str)
assert tokenizer.vocab[tokenizer.encode(sentences, str)] \
== tokenizer.encode(sentences, int)
def verify_decode_no_vocab_raise(tokenizer):
# When the vocab is not attached, should raise ValueError
for sentences in [EN_SAMPLES[0], EN_SAMPLES]:
with pytest.raises(ValueError):
tokenizer.encode(sentences, int)
with pytest.raises(ValueError):
tokenizer.decode([0])
with pytest.raises(ValueError):
tokenizer.decode([[0], [1]])
def verify_pickleble(tokenizer, cls):
print(tokenizer)
# Verify if the tokenizer is pickleable and has the same behavior after dumping/loading
tokenizer_p = pickle.loads(pickle.dumps(tokenizer))
assert isinstance(tokenizer_p, cls)
assert tokenizer.encode(SUBWORD_TEST_SAMPLES, str) == tokenizer_p.encode(SUBWORD_TEST_SAMPLES, str)
def test_whitespace_tokenizer():
tokenizer = WhitespaceTokenizer()
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers', 'brought',
'forth', 'on', 'this', 'continent,', 'a', 'new', 'nation,', 'conceived',
'in', 'Liberty,', 'and', 'dedicated', 'to', 'the', 'proposition', 'that',
'all', 'men', 'are', 'created', 'equal.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan,', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie;', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt,', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat.'],
['"Das', 'ist', 'eine', 'Frage,', 'die', 'natürlich', 'davon', 'abhängt,',
'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt,', 'dass', 'man', 'mit',
'ihm', 'auch', 'darüber', 'spricht,', 'warum', 'er', 'das', 'eine', 'oder',
'andere', 'offenbar', 'so', 'empfunden', 'hat,', 'wie', 'das', 'in',
'seinem', 'Statement', 'niedergelegt', 'ist",', 'sagte', 'Grindel', 'im',
'Fußball-Podcast', '"Phrasenmäher"', 'der', '"Bild-Zeitung.']]
for _ in range(2):
# Inject noise and test for encode
noisy_en_samples = [random_inject_space(ele) for ele in EN_SAMPLES]
noisy_de_samples = [random_inject_space(ele) for ele in DE_SAMPLES]
verify_encode_token(tokenizer, noisy_en_samples + noisy_de_samples,
gt_en_tokenized + gt_de_tokenized)
# Test for decode
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, str)
# Test for encode_with_offsets
verify_encode_token_with_offsets(tokenizer, noisy_en_samples + noisy_de_samples)
verify_decode_no_vocab_raise(tokenizer)
# Test for output_type = int
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized,
[])))
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, int)
verify_pickleble(tokenizer, WhitespaceTokenizer)
verify_encode_token_with_offsets(tokenizer, EN_SAMPLES + DE_SAMPLES)
def test_moses_tokenizer():
en_tokenizer = MosesTokenizer('en')
de_tokenizer = MosesTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt', ',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden',
'hat', ',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt',
'ist', '"', ',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast',
'"', 'Phrasenmäher', '"', 'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
verify_decode(en_tokenizer, EN_SAMPLES, str)
verify_decode(de_tokenizer, DE_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
verify_decode_no_vocab_raise(en_tokenizer)
verify_decode_no_vocab_raise(de_tokenizer)
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_decode(en_tokenizer, EN_SAMPLES, int)
verify_decode(de_tokenizer, DE_SAMPLES, int)
verify_pickleble(en_tokenizer, MosesTokenizer)
verify_pickleble(de_tokenizer, MosesTokenizer)
def test_jieba_tokenizer():
tokenizer = JiebaTokenizer()
gt_zh_tokenized = [['苟活', '者', '在', '淡红', '的', '血色', '中', ',',
'会', '依稀', '看见', '微茫', '的', '希望', ';', '真的',
'猛士', ',', '将', '更奋', '然而', '前行', '。'],
['参加', '工作', ',', '哈尔滨工业大学', '无线电', '工程系', '电子仪器',
'及', '测量', '技术', '专业', '毕业', '。']]
verify_encode_token(tokenizer, ZH_SAMPLES, gt_zh_tokenized)
verify_decode(tokenizer, ZH_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_zh_tokenized, [])))
verify_decode_no_vocab_raise(tokenizer)
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, ZH_SAMPLES, int)
verify_pickleble(tokenizer, JiebaTokenizer)
def test_spacy_tokenizer():
en_tokenizer = SpacyTokenizer('en')
de_tokenizer = SpacyTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt', 'Frankfurt',
',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und', 'kaiserlicher',
'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden', 'hat',
',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt', 'ist', '"',
',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast', '"', 'Phrasenmäher', '"',
'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_pickleble(en_tokenizer, SpacyTokenizer)
verify_pickleble(de_tokenizer, SpacyTokenizer)
verify_encode_token_with_offsets(en_tokenizer, EN_SAMPLES)
verify_encode_token_with_offsets(de_tokenizer, DE_SAMPLES)
# Test for loading spacy tokenizer from specifying the "model" flag
en_tokenizer = SpacyTokenizer(model='en_core_web_lg')
out = en_tokenizer.encode(EN_SAMPLES)
def test_yttm_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'yttm.model')
download(url=get_repo_url() + 'tokenizer_test_models/yttm/test_ende_yttm-6f2c39.model',
path=model_path)
tokenizer = YTTMTokenizer(model_path=model_path)
gt_tokenized = [['▁He', 'll', 'o', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁',
'Ⅷ', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '-A', 'm', 'az', 'on', '-H', 'a', 'ib',
'in', '-L', 'e', 'on', 'ard', '-S', 'hen', 'g', '-S', 'h', 'u', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 4), (4, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 10), (10, 11), (11, 13),
(13, 15), (15, 17), (17, 18), (18, 20), (20, 22), (22, 24), (24, 25), (25, 27),
(27, 30), (30, 32), (32, 35), (35, 36), (36, 38), (38, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53), (53, 54),
(54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62),
(62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y<UNK>all! How are you <UNK> <UNK> <UNK> <UNK> ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# <UNK>abc<UNK>']
gt_str_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, YTTMTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
# Begin to verify decode
for sample_sentences, ele_gt_int_decode, ele_gt_str_decode in [(SUBWORD_TEST_SAMPLES[0], gt_int_decode[0], gt_str_decode[0]),
(SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)]:
int_decode = tokenizer.decode(tokenizer.encode(sample_sentences, int))
str_decode = tokenizer.decode(tokenizer.encode(sample_sentences, str))
assert int_decode == ele_gt_int_decode
assert str_decode == ele_gt_str_decode
os.remove(model_path)
assert tokenizer.decode([]) == ''
assert tokenizer.decode([[]]) == ['']
@pytest.mark.seed(123)
def test_sentencepiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'spm.model')
download(url=get_repo_url()
+ 'tokenizer_test_models/sentencepiece/case1/test_ende-a9bee4.model',
path=model_path)
# Case1
tokenizer = SentencepieceTokenizer(model_path)
gt_tokenized = [['▁Hel', 'lo', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you',
'▁', 'VI', 'II', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '-', 'A', 'ma', 'zo', 'n', '-', 'H', 'ai',
'bin', '-', 'L', 'e', 'on', 'ard', '-', 'S', 'hen', 'g', '-', 'S', 'hu', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!', '@',
'#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 26), (26, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 12),
(12, 14), (14, 15), (15, 16), (16, 17), (17, 19), (19, 22), (22, 23), (23, 24),
(24, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36), (36, 37),
(37, 38), (38, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48), (48, 51),
(51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (61, 62), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y ⁇ all! How are you VIII ⁇ ⁇ ⁇ ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:! ⁇ # ⁇ abc ⁇ ']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
# Case2, lower_case
gt_lower_case_int_decode = ['hello, y ⁇ all! how are you viii ⁇ ⁇ ⁇ ?',
'gluonnlp is great!!!!!!',
'gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:! ⁇ # ⁇ abc ⁇ ']
tokenizer = SentencepieceTokenizer(model_path, lowercase=True)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_lower_case_int_decode)
# Case3, Use the sentencepiece regularization commands, we test whether we can obtain different encoding results
tokenizer = SentencepieceTokenizer(model_path, lowercase=True, nbest=-1, alpha=1.0)
has_different_encode_out = False
encode_out = None
for _ in range(10):
if encode_out is None:
encode_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
else:
ele_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
if ele_out != encode_out:
has_different_encode_out = True
break
assert has_different_encode_out
os.remove(model_path)
def test_subword_nmt_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'subword_nmt.model')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende-d189ff.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'subword_nmt.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende_vocab-900f81.json',
path=vocab_path)
# Case 1
tokenizer = SubwordNMTTokenizer(model_path, vocab_path)
gt_tokenized = [["Hel", "lo", ",</w>", "y", "\'", "all", "!</w>", "How</w>", "are</w>", "you</w>",
"Ⅷ</w>", "😁</w>", "😁</w>", "😁</w>", "?</w>"],
["Gl", "u", "on", "N", "L", "P</w>", "is</w>", "great", "!", "!", "!", "!!",
"!</w>"],
["Gl", "u", "on", "N", "L", "P", "-", "Amaz", "on-", "H", "ai", "b", "in-", "Le",
"on", "ard", "-", "Sh", "eng", "-", "Sh", "u", "ai", "-", "X", "ing", "ji",
"an", "..", "...", "/", ":", "!", "@", "#</w>", "\'", "ab", "c", "\'</w>"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 16),
(16, 17), (17, 19), (19, 20), (20, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 33), (33, 36), (36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 49), (49, 51), (51, 53), (53, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ["Hello, y\'all! How are you Ⅷ 😁 😁 😁 ?",
"GluonNLP is great!!!!!!",
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@
gt_str_decode = SUBWORD_TEST_SAMPLES
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SubwordNMTTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_subword_nmt(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)
# Case 2, bpe_dropout
# We use str decode here because we may not perfectly recover the original sentence with int decode.
tokenizer = SubwordNMTTokenizer(model_path, vocab_path, bpe_dropout=0.5)
verify_decode(tokenizer, SUBWORD_TEST_SAMPLES, out_type=str)
os.remove(model_path)
os.remove(vocab_path)
def test_huggingface_bpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'test_hf_bpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'test_hf_bpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'test_hf_bpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello</w>', ',</w>', 'y</w>', "'</w>", 'all</w>', '!</w>', 'How</w>',
'are</w>', 'you</w>', '<unk>', '<unk>', '<unk>', '<unk>', '?</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', 'is</w>', 'great</w>', '!</w>', '!</w>',
'!</w>', '!</w>', '!</w>', '!</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', '-</w>', 'Amazon</w>', '-</w>', 'H', 'ai',
'bin</w>', '-</w>', 'Leonard</w>', '-</w>', 'Sh', 'en', 'g</w>', '-</w>',
'Sh', 'u', 'ai</w>', '-</w>', 'X', 'ing', 'j', 'ian</w>', '.</w>', '.</w>',
'.</w>', '.</w>', '.</w>', '/</w>', ':</w>', '!</w>', '@</w>', '#</w>',
"'</w>", 'ab', 'c</w>', "'</w>"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21), (22, 25),
(26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (9, 11), (12, 17), (17, 18), (18, 19),
(19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16), (16, 17),
(17, 19), (19, 22), (22, 23), (23, 30), (30, 31), (31, 33), (33, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48),
(48, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
# gt_int_decode = gt_str_decode for hf
# hf removed the unk tokens in decode result
gt_decode = ["Hello , y ' all ! How are you ?",
'GluonNLP is great ! ! ! ! ! !',
"GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian . . . . . / : ! @
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_decode = ["hello , y ' all ! how are you ?",
'gluonnlp is great ! ! ! ! ! !',
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian . . . . . / : ! @ # ' abc '"]
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_bytebpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'hf_bytebpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_bytebpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'Ġ?'],
['Gl', 'u', 'on', 'N', 'LP', 'Ġis', 'Ġgreat', 'ï¼', 'ģ', 'ï¼',
'ģ', 'ï¼', 'ģ', '!!!'],
['Gl', 'u', 'on', 'N', 'LP', '-', 'Amazon', '-', 'Ha', 'ib', 'in',
'-', 'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u',
'ai', '-', 'X', 'ing', 'j', 'ian', '.....', '/', ':', '!', '@',
'#', "Ġ'", 'ab', 'c', "'"]]
# the defination of the offsets of bytelevel seems not clear
gt_offsets = [[(0, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 27), (26, 27), (26, 27), (27, 29), (28, 29), (29, 31),
(30, 31), (31, 33), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18),
(17, 18), (18, 19), (18, 19), (19, 20), (19, 20), (20, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16),
(16, 18), (18, 20), (20, 22), (22, 23), (23, 25), (25, 27), (27, 30),
(30, 31), (31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 56),
(56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_int_decode = ["hello, y'all! how are you ⅷ 😁 😁 😁 ?",
'gluonnlp is great!!!!!!',
"gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:!@
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_int_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceByteBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 'hf_wordpiece.vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_wordpiece.hf_vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.hf_vocab',
path=hf_vocab_path)
# Case 1, lowercase=True
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=True)
gt_tokenized = [["hello", ",", "y", "'", "all", "!", "how", "are", "you",
"<unk>", "<unk>", "<unk>", "<unk>", "?"],
["gl", "##uo", "##nn", "##l", "##p", "is", "great", "\uff01",
"\uff01", "\uff01", "!", "!", "!"],
["gl", "##uo", "##nn", "##l", "##p", "-", "amazon", "-", "hai",
"##bin", "-", "leonard", "-", "shen", "##g", "-", "shu", "##ai", "-",
"xin", "##g", "##ji", "##an", ".", ".", ".", ".", ".", "/", ":", "!",
"@", "#", "'", "abc", "'"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (8, 9), (9, 15), (15, 16), (16, 19),
(19, 22), (22, 23), (23, 30), (30, 31), (31, 35), (35, 36), (36, 37), (37, 40),
(40, 42), (42, 43), (43, 46), (46, 47), (47, 49), (49, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61),
(62, 63), (63, 66), (66, 67)]]
gt_decode = ["hello, y'all! how are you?",
"gluonnlp is great ! ! !!!!",
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian..... / :! @
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=False
gt_lowercase_decode = [", y'all! are you?",
"is great ! ! !!!!",
"- - - - - -..... / :! @ #'abc '"]
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=False)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceWordPieceTokenizer(hf_vocab_path, lowercase=True)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(vocab_path)
os.remove(hf_vocab_path)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_wordpiece_tokenizer_v08():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.model',
path=model_path,
sha1_hash='66ccadf6e5e354ff9604e4a82f107a2ac873abd5')
vocab_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.vocab',
path=vocab_path,
sha1_hash='dd6fdf4bbc74eaa8806d12cb3d38a4d9a306aea8')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['Hel', '##lo', ',', 'y', '[UNK]', 'all', '!',
'How', 'are', 'you', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '?'],
['Gl', '##u', '##on', '##N', '##L', '##P', 'is', 'great', '[UNK]',
'[UNK]', '[UNK]', '!', '!', '!'],
['Gl', '##u', '##on', '##N', '##L', '##P', '-',
'Am', '##az', '##on', '-', 'Ha', '##ibi', '##n', '-', 'Leon', '##ard',
'-', 'She', '##n', '##g', '-', 'Sh', '##ua', '##i', '-', 'X',
'##ing', '##j', '##ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '[UNK]', 'ab', '##c', '[UNK]']]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13),
(14, 17), (18, 21), (22, 25), (26, 27), (28, 29), (30, 31),
(32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9),
(9, 11), (11, 13), (13, 15), (15, 16), (16, 18), (18, 21),
(21, 22), (22, 23), (23, 27), (27, 30), (30, 31), (31, 34),
(34, 35), (35, 36), (36, 37), (37, 39), (39, 41), (41, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52),
(52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66),
(66, 67)]]
gt_decode = ['Hello, y all! How are you?',
'GluonNLP is great!!!',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian..... / '
':! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bpe_tokenizer_v08():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.model',
path=model_path,
sha1_hash='ecda90979561ca4c5a8d769b5e3c9fa2270d5317')
vocab_path = os.path.join(dir_path, 'hf_bpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.vocab',
path=vocab_path,
sha1_hash='b92dde0b094f405208f3ec94b5eae88430bf4262')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['H', 'ello</w>', ',</w>', 'y</w>', 'all</w>', '!</w>',
'How</w>', 'are</w>', 'you</w>', '?</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', 'is</w>', 'great</w>',
'!</w>', '!</w>', '!</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', '-</w>', 'Amaz', 'on</w>',
'-</w>', 'Ha', 'i', 'bin</w>', '-</w>', 'Leon', 'ard</w>', '-</w>',
'Sh', 'eng</w>', '-</w>', 'S', 'hu', 'ai</w>', '-</w>', 'X', 'ing',
'j', 'ian</w>', '.</w>', '.</w>', '.</w>', '.</w>', '.</w>', '/</w>',
':</w>', '!</w>', '@</w>', '#</w>', 'ab', 'c</w>']]
gt_offsets = [[(0, 1), (1, 5), (5, 6), (7, 8), (9, 12), (12, 13), (14, 17),
(18, 21), (22, 25), (34, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 15),
(15, 16), (16, 18), (18, 19), (19, 22), (22, 23), (23, 27), (27, 30),
(30, 31), (31, 33), (33, 36), (36, 37), (37, 38), (38, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (63, 65), (65, 66)]]
gt_decode = ['Hello , y all ! How are you ?',
'GluonNLP is great ! ! !',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian'
' . . . . . / : ! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bytebpe_tokenizer_v08():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.model',
path=model_path,
sha1_hash='a1c4da1f6c21df923e150f56dbb5b7a53c61808b')
vocab_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.vocab',
path=vocab_path,
sha1_hash='7831b19078a3222f450e65b2188dc0770473123b')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['He', 'llo', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ', 'ð', 'Ł', 'ĺ',
'ģ', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ?'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', 'Ġis', 'Ġgreat', 'ï', '¼', 'ģ',
'ï', '¼', 'ģ', 'ï', '¼', 'ģ', '!', '!', '!'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', '-', 'Am', 'az', 'on', '-',
'Ha', 'ib', 'in', '-', 'Le', 'on', 'ard', '-', 'S', 'hen', 'g', '-',
'Sh', 'u', 'ai', '-', 'X', 'ing', 'j', 'ian',
'..', '...', '/', ':', '!', '@', '#', 'Ġ', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 27), (26, 27), (26, 27), (27, 28), (28, 29),
(28, 29), (28, 29), (28, 29), (29, 30), (30, 31), (30, 31), (30, 31),
(30, 31), (31, 32), (32, 33), (32, 33), (32, 33), (32, 33), (33, 35)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17),
(17, 18), (17, 18), (17, 18), (18, 19), (18, 19), (18, 19), (19, 20),
(19, 20), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 11),
(11, 13), (13, 15), (15, 16), (16, 18), (18, 20), (20, 22), (22, 23),
(23, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 51), (51, 53), (53, 56), (56, 57),
(57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
def test_tokenizers_create():
tokenizer = gluonnlp.data.tokenizers.create('moses', 'en')
tokenizer.encode('hello world!')
| true | true |
1c475796efa58d436a4aeaa031170fd8364ddc7a | 256 | py | Python | 09/01/01/5.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | 09/01/01/5.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | 46 | 2017-06-30T22:19:07.000Z | 2017-07-31T22:51:31.000Z | 10/01/01/5.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | class Base1:
def __init__(self): print('Base1.__init__');
class Base2:
def __init__(self): print('Base2.__init__');
class Super(Base1, Base2):
def __init__(self): print('Super.__init__'); Base1.__init__(self); Base2.__init__(self)
c = Super()
| 28.444444 | 91 | 0.699219 | class Base1:
def __init__(self): print('Base1.__init__');
class Base2:
def __init__(self): print('Base2.__init__');
class Super(Base1, Base2):
def __init__(self): print('Super.__init__'); Base1.__init__(self); Base2.__init__(self)
c = Super()
| true | true |
1c4757eb287bb3f279ee1609d1ef569abd806f07 | 156 | py | Python | tests/models.py | rtidatascience/django-postgres-power | cf3f714ab9d8919187dc478f1d0679945017ae17 | [
"BSD-3-Clause"
] | 16 | 2015-12-10T06:37:49.000Z | 2021-07-16T00:02:41.000Z | tests/models.py | rtidatascience/django-postgres-power | cf3f714ab9d8919187dc478f1d0679945017ae17 | [
"BSD-3-Clause"
] | 4 | 2016-08-23T13:31:33.000Z | 2019-04-08T15:47:38.000Z | tests/models.py | rtidatascience/django-postgres-power | cf3f714ab9d8919187dc478f1d0679945017ae17 | [
"BSD-3-Clause"
] | 7 | 2016-08-23T12:57:55.000Z | 2020-11-14T21:08:53.000Z | from django.db import models
class Checkin(models.Model):
logged_at = models.DateTimeField()
class Number(models.Model):
n = models.IntegerField() | 22.285714 | 38 | 0.74359 | from django.db import models
class Checkin(models.Model):
logged_at = models.DateTimeField()
class Number(models.Model):
n = models.IntegerField() | true | true |
1c475960ea7c505c741557bae2f651bd3511c226 | 2,710 | py | Python | cimcb_lite/utils/table_check.py | RuibingS/cimcb | 382f7d8fff30d3d276f18ac8c7dc686e0e643fa9 | [
"MIT"
] | 3 | 2019-05-19T10:36:50.000Z | 2020-10-12T08:13:04.000Z | cimcb_lite/utils/table_check.py | RuibingS/cimcb | 382f7d8fff30d3d276f18ac8c7dc686e0e643fa9 | [
"MIT"
] | 1 | 2019-03-24T11:04:39.000Z | 2019-03-26T03:54:51.000Z | cimcb_lite/utils/table_check.py | RuibingS/cimcb | 382f7d8fff30d3d276f18ac8c7dc686e0e643fa9 | [
"MIT"
] | 3 | 2019-05-19T10:37:03.000Z | 2020-10-12T08:13:05.000Z | import numpy as np
def table_check(DataTable, PeakTable, print_statement=True):
"""Error checking for DataTable and PeakTable (used in load_dataXL).
Parameters
----------
DataTable: DataFrame
Data sheet with the required columns.
PeakTable: DataFrame
Peak sheet with the required columns.
print_statement: boolean (default True)
If the error checks are successful and print_statement is True, the following is printed: "Data Table & Peak Table is suitable."
"""
# Check DataTable for Idx, Class and SampleID
data_columns = DataTable.columns.values
if "Idx" not in data_columns:
raise ValueError("Data Table does not contain the required 'Idx' column")
if DataTable.Idx.isnull().values.any() == True:
raise ValueError("Data Table Idx column cannot contain missing values")
if len(np.unique(DataTable.Idx)) != len(DataTable.Idx):
raise ValueError("Data Table Idx numbers are not unique. Please change")
if "Class" not in data_columns:
raise ValueError("Data Table does not contain the required 'Class' column")
if "SampleID" not in data_columns:
raise ValueError("Data Table does not contain the required 'SampleID' column")
# Check PeakTable for Idx, Name, Label
peak_columns = PeakTable.columns.values
if "Idx" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Idx' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Idx column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Idx numbers are not unique. Please change")
if "Name" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Name' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Name column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Name numbers are not unique. Please change")
if "Label" not in peak_columns:
raise ValueError("Data Table does not contain the required 'Label' column")
# Check that Peak Names in PeakTable & DataTable match
peak_list = PeakTable.Name
data_columns = DataTable.columns.values
temp = np.intersect1d(data_columns, peak_list)
if len(temp) != len(peak_list):
raise ValueError("The Peak Names in Data Table should exactly match the Peak Names in Peak Table. Remember that all Peak Names should be unique.")
if print_statement is True:
print("Data Table & Peak Table is suitable.")
| 41.692308 | 154 | 0.700738 | import numpy as np
def table_check(DataTable, PeakTable, print_statement=True):
data_columns = DataTable.columns.values
if "Idx" not in data_columns:
raise ValueError("Data Table does not contain the required 'Idx' column")
if DataTable.Idx.isnull().values.any() == True:
raise ValueError("Data Table Idx column cannot contain missing values")
if len(np.unique(DataTable.Idx)) != len(DataTable.Idx):
raise ValueError("Data Table Idx numbers are not unique. Please change")
if "Class" not in data_columns:
raise ValueError("Data Table does not contain the required 'Class' column")
if "SampleID" not in data_columns:
raise ValueError("Data Table does not contain the required 'SampleID' column")
peak_columns = PeakTable.columns.values
if "Idx" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Idx' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Idx column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Idx numbers are not unique. Please change")
if "Name" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Name' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Name column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Name numbers are not unique. Please change")
if "Label" not in peak_columns:
raise ValueError("Data Table does not contain the required 'Label' column")
peak_list = PeakTable.Name
data_columns = DataTable.columns.values
temp = np.intersect1d(data_columns, peak_list)
if len(temp) != len(peak_list):
raise ValueError("The Peak Names in Data Table should exactly match the Peak Names in Peak Table. Remember that all Peak Names should be unique.")
if print_statement is True:
print("Data Table & Peak Table is suitable.")
| true | true |
1c475968ebbd39e752c755cb7b4598bf947a6220 | 556 | py | Python | src/log.py | ENDERZOMBI102/endc-lang | 554c540111adae52c3ec23c75474d2121d339df4 | [
"MIT"
] | null | null | null | src/log.py | ENDERZOMBI102/endc-lang | 554c540111adae52c3ec23c75474d2121d339df4 | [
"MIT"
] | null | null | null | src/log.py | ENDERZOMBI102/endc-lang | 554c540111adae52c3ec23c75474d2121d339df4 | [
"MIT"
] | null | null | null | import sys
from typing import TextIO
from cli import args
def _log(level: int, msg: str, file: TextIO) -> None:
if args.verboseLevel <= level:
print(msg, file=file)
def debug(msg: str, file: TextIO = sys.stdout) -> None:
if args.debug:
_log( 0, f'[DEBUG] {msg}', file )
def info(msg: str, file: TextIO = sys.stdout) -> None:
_log( 1, f'[INFO] {msg}', file )
def warn(msg: str, file: TextIO = sys.stderr) -> None:
_log( 2, f'[WARN] {msg}', file )
def error(msg: str, file: TextIO = sys.stderr) -> None:
_log( 3, f'[ERROR] {msg}', file )
| 20.592593 | 55 | 0.627698 | import sys
from typing import TextIO
from cli import args
def _log(level: int, msg: str, file: TextIO) -> None:
if args.verboseLevel <= level:
print(msg, file=file)
def debug(msg: str, file: TextIO = sys.stdout) -> None:
if args.debug:
_log( 0, f'[DEBUG] {msg}', file )
def info(msg: str, file: TextIO = sys.stdout) -> None:
_log( 1, f'[INFO] {msg}', file )
def warn(msg: str, file: TextIO = sys.stderr) -> None:
_log( 2, f'[WARN] {msg}', file )
def error(msg: str, file: TextIO = sys.stderr) -> None:
_log( 3, f'[ERROR] {msg}', file )
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.