text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import logging
from six.moves.urllib.parse import urljoin
from xml.etree import ElementTree
import recurly
import recurly.js as js
from recurly.errors import *
from recurly.resource import Resource, Money, PageError
"""
Recurly's Python client library is an interface to its REST API.
Please see the Recurly API documentation for more information:
https://dev.recurly.com/docs/getting-started
"""
__version__ = '2.2.15'
BASE_URI = 'https://%s.recurly.com/v2/'
"""The API endpoint to send requests to."""
SUBDOMAIN = 'api'
"""The subdomain of the site authenticating API requests."""
API_KEY = None
"""The API key to use when authenticating API requests."""
API_VERSION = '2.1'
"""The API version to use when making API requests."""
CA_CERTS_FILE = None
"""A file contianing a set of concatenated certificate authority certs
for validating the server against."""
DEFAULT_CURRENCY = 'USD'
"""The currency to use creating `Money` instances when one is not specified."""
SOCKET_TIMEOUT_SECONDS = None
"""The number of seconds after which to timeout requests to the Recurly API.
If unspecified, the global default timeout is used."""
def base_uri():
if SUBDOMAIN is None:
raise ValueError('recurly.SUBDOMAIN not set')
return BASE_URI % SUBDOMAIN
def api_version():
return API_VERSION
class Address(Resource):
nodename = 'address'
attributes = (
'address1',
'address2',
'city',
'state',
'zip',
'country',
'phone',
)
class Account(Resource):
"""A customer account."""
member_path = 'accounts/%s'
collection_path = 'accounts'
nodename = 'account'
attributes = (
'account_code',
'username',
'email',
'first_name',
'last_name',
'company_name',
'vat_number',
'tax_exempt',
'entity_use_code',
'accept_language',
'created_at',
)
_classes_for_nodename = {'address': Address}
sensitive_attributes = ('number', 'verification_value',)
def to_element(self):
elem = super(Account, self).to_element()
# Make sure the account code is always included in a serialization.
if 'account_code' not in self.__dict__: # not already included
try:
account_code = self.account_code
except AttributeError:
pass
else:
elem.append(self.element_for_value('account_code', account_code))
if 'billing_info' in self.__dict__:
elem.append(self.billing_info.to_element())
if 'address' in self.__dict__:
elem.append(self.address.to_element())
return elem
@classmethod
def all_active(cls, **kwargs):
"""Return a `Page` of active customer accounts.
This is a convenience method for `Account.all(state='active')`.
"""
return cls.all(state='active', **kwargs)
@classmethod
def all_closed(cls, **kwargs):
"""Return a `Page` of closed customer accounts.
This is a convenience method for `Account.all(state='closed')`.
"""
return cls.all(state='closed', **kwargs)
@classmethod
def all_past_due(cls, **kwargs):
"""Return a `Page` of past-due customer accounts.
This is a convenience method for `Account.all(state='past_due').
"""
return cls.all(state='past_due', **kwargs)
@classmethod
def all_subscribers(cls, **kwargs):
"""Return a `Page` of customer accounts that are subscribers.
This is a convenience method for `Account.all(state='subscriber').
"""
return cls.all(state='subscriber', **kwargs)
@classmethod
def all_non_subscribers(cls, **kwargs):
"""Return a `Page` of customer accounts that are not subscribers.
This is a convenience method for `Account.all(state='non_subscriber').
"""
return cls.all(state='non_subscriber', **kwargs)
def __getattr__(self, name):
if name == 'billing_info':
try:
billing_info_url = self._elem.find('billing_info').attrib['href']
except (AttributeError, KeyError):
raise AttributeError(name)
resp, elem = BillingInfo.element_for_url(billing_info_url)
return BillingInfo.from_element(elem)
try:
return super(Account, self).__getattr__(name)
except AttributeError:
if name == 'address':
self.address = Address()
return self.address
else:
raise AttributeError(name)
def charge(self, charge):
"""Charge (or credit) this account with the given `Adjustment`."""
url = urljoin(self._url, '%s/adjustments' % self.account_code)
return charge.post(url)
def invoice(self, **kwargs):
"""Create an invoice for any outstanding adjustments this account has."""
url = urljoin(self._url, '%s/invoices' % self.account_code)
if kwargs:
response = self.http_request(url, 'POST', Invoice(**kwargs), {'Content-Type':
'application/xml; charset=utf-8'})
else:
response = self.http_request(url, 'POST')
if response.status != 201:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
invoice = Invoice.from_element(elem)
invoice._url = response.getheader('Location')
return invoice
def build_invoice(self):
"""Preview an invoice for any outstanding adjustments this account has."""
url = urljoin(self._url, '%s/invoices/preview' % self.account_code)
response = self.http_request(url, 'POST')
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
invoice = Invoice.from_element(elem)
return invoice
def notes(self):
"""Fetch Notes for this account."""
url = urljoin(self._url, '%s/notes' % self.account_code)
return Note.paginated(url)
def redemption(self):
try:
return self.redemptions()[0]
except AttributeError:
raise AttributeError("redemption")
def reopen(self):
"""Reopen a closed account."""
url = urljoin(self._url, '%s/reopen' % self.account_code)
response = self.http_request(url, 'PUT')
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
def subscribe(self, subscription):
"""Create the given `Subscription` for this existing account."""
url = urljoin(self._url, '%s/subscriptions' % self.account_code)
return subscription.post(url)
def update_billing_info(self, billing_info):
"""Change this account's billing information to the given `BillingInfo`."""
url = urljoin(self._url, '%s/billing_info' % self.account_code)
response = billing_info.http_request(url, 'PUT', billing_info,
{'Content-Type': 'application/xml; charset=utf-8'})
if response.status == 200:
pass
elif response.status == 201:
billing_info._url = response.getheader('Location')
else:
billing_info.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
billing_info.update_from_element(ElementTree.fromstring(response_xml))
class BillingInfo(Resource):
"""A set of billing information for an account."""
nodename = 'billing_info'
attributes = (
'type',
'name_on_account',
'first_name',
'last_name',
'number',
'verification_value',
'year',
'month',
'start_month',
'start_year',
'issue_number',
'company',
'address1',
'address2',
'city',
'state',
'zip',
'country',
'phone',
'vat_number',
'ip_address',
'ip_address_country',
'card_type',
'first_six',
'last_four',
'paypal_billing_agreement_id',
'amazon_billing_agreement_id',
'token_id',
'account_type',
'routing_number',
'account_number',
)
sensitive_attributes = ('number', 'verification_value', 'account_number')
xml_attribute_attributes = ('type',)
class Coupon(Resource):
"""A coupon for a customer to apply to their account."""
member_path = 'coupons/%s'
collection_path = 'coupons'
nodename = 'coupon'
attributes = (
'coupon_code',
'name',
'discount_type',
'discount_percent',
'discount_in_cents',
'redeem_by_date',
'invoice_description',
'single_use',
'applies_for_months',
'duration',
'temporal_unit',
'temporal_amount',
'max_redemptions',
'applies_to_all_plans',
'applies_to_non_plan_charges',
'redemption_resource',
'created_at',
'plan_codes',
'hosted_description',
'max_redemptions_per_account',
)
@classmethod
def value_for_element(cls, elem):
if not elem or elem.tag != 'plan_codes' or elem.attrib.get('type') != 'array':
return super(Coupon, cls).value_for_element(elem)
return [code_elem.text for code_elem in elem]
@classmethod
def element_for_value(cls, attrname, value):
if attrname != 'plan_codes':
return super(Coupon, cls).element_for_value(attrname, value)
elem = ElementTree.Element(attrname)
elem.attrib['type'] = 'array'
for code in value:
code_el = ElementTree.Element('plan_code')
code_el.text = code
elem.append(code_el)
return elem
@classmethod
def all_redeemable(cls, **kwargs):
"""Return a `Page` of redeemable coupons.
This is a convenience method for `Coupon.all(state='redeemable')`.
"""
return cls.all(state='redeemable', **kwargs)
@classmethod
def all_expired(cls, **kwargs):
"""Return a `Page` of expired coupons.
This is a convenience method for `Coupon.all(state='expired')`.
"""
return cls.all(state='expired', **kwargs)
@classmethod
def all_maxed_out(cls, **kwargs):
"""Return a `Page` of coupons that have been used the maximum
number of times.
This is a convenience method for `Coupon.all(state='maxed_out')`.
"""
return cls.all(state='maxed_out', **kwargs)
def has_unlimited_redemptions_per_account(self):
return self.max_redemptions_per_account == None
class Redemption(Resource):
"""A particular application of a coupon to a customer account."""
nodename = 'redemption'
attributes = (
'account_code',
'single_use',
'total_discounted_in_cents',
'subscription_uuid',
'currency',
'created_at',
)
def delete_url(self):
return self._url + "s/" + self.uuid
class TaxDetail(Resource):
"""A charge's tax breakdown"""
nodename = 'taxdetail'
inherits_currency = True
attributes = (
'name',
'type',
'tax_rate',
'tax_in_cents',
)
class Adjustment(Resource):
"""A charge or credit applied (or to be applied) to an account's invoice."""
nodename = 'adjustment'
member_path = 'adjustments/%s'
attributes = (
'uuid',
'description',
'accounting_code',
'quantity',
'unit_amount_in_cents',
'discount_in_cents',
'tax_in_cents',
'tax_type',
'tax_region',
'tax_rate',
'total_in_cents',
'currency',
'tax_exempt',
'tax_code',
'tax_details',
'start_date',
'end_date',
'created_at',
'type',
)
xml_attribute_attributes = ('type',)
_classes_for_nodename = {'tax_detail': TaxDetail,}
# This can be removed when the `original_adjustment_uuid` is moved to a link
def __getattr__(self, name):
if name == 'original_adjustment':
try:
uuid = super(Adjustment, self).__getattr__('original_adjustment_uuid')
except (AttributeError):
return super(Adjustment, self).__getattr__(name)
return lambda: Adjustment.get(uuid)
else:
return super(Adjustment, self).__getattr__(name)
class Invoice(Resource):
"""A payable charge to an account for the customer's charges and
subscriptions."""
member_path = 'invoices/%s'
collection_path = 'invoices'
nodename = 'invoice'
attributes = (
'uuid',
'state',
'invoice_number',
'invoice_number_prefix',
'po_number',
'vat_number',
'subtotal_in_cents',
'tax_in_cents',
'tax_type',
'tax_rate',
'total_in_cents',
'currency',
'created_at',
'line_items',
'transactions',
'terms_and_conditions',
'customer_notes',
'address',
'closed_at',
)
blacklist_attributes = (
'currency',
)
def invoice_number_with_prefix(self):
return '%s%s' % (self.invoice_number_prefix, self.invoice_number)
@classmethod
def all_open(cls, **kwargs):
"""Return a `Page` of open invoices.
This is a convenience method for `Invoice.all(state='open')`.
"""
return cls.all(state='open', **kwargs)
@classmethod
def all_collected(cls, **kwargs):
"""Return a `Page` of collected invoices.
This is a convenience method for `Invoice.all(state='collected')`.
"""
return cls.all(state='collected', **kwargs)
@classmethod
def all_failed(cls, **kwargs):
"""Return a `Page` of failed invoices.
This is a convenience method for `Invoice.all(state='failed')`.
"""
return cls.all(state='failed', **kwargs)
@classmethod
def all_past_due(cls, **kwargs):
"""Return a `Page` of past-due invoices.
This is a convenience method for `Invoice.all(state='past_due')`.
"""
return cls.all(state='past_due', **kwargs)
@classmethod
def pdf(cls, uuid):
"""Return a PDF of the invoice identified by the UUID
This is a raw string, which can be written to a file with:
`
with open('invoice.pdf', 'w') as invoice_file:
invoice_file.write(recurly.Invoice.pdf(uuid))
`
"""
url = urljoin(base_uri(), cls.member_path % (uuid,))
pdf_response = cls.http_request(url, headers={'Accept': 'application/pdf'})
return pdf_response.read()
def refund_amount(self, amount_in_cents, refund_apply_order = 'credit'):
amount_element = self.refund_open_amount_xml(amount_in_cents, refund_apply_order)
return self._create_refund_invoice(amount_element)
def refund(self, adjustments, refund_apply_order = 'credit'):
adjustments_element = self.refund_line_items_xml(adjustments, refund_apply_order)
return self._create_refund_invoice(adjustments_element)
def refund_open_amount_xml(self, amount_in_cents, refund_apply_order):
elem = ElementTree.Element(self.nodename)
elem.append(Resource.element_for_value('refund_apply_order', refund_apply_order))
elem.append(Resource.element_for_value('amount_in_cents',
amount_in_cents))
return elem
def refund_line_items_xml(self, line_items, refund_apply_order):
elem = ElementTree.Element(self.nodename)
elem.append(Resource.element_for_value('refund_apply_order', refund_apply_order))
line_items_elem = ElementTree.Element('line_items')
for item in line_items:
adj_elem = ElementTree.Element('adjustment')
adj_elem.append(Resource.element_for_value('uuid',
item['adjustment'].uuid))
adj_elem.append(Resource.element_for_value('quantity',
item['quantity']))
adj_elem.append(Resource.element_for_value('prorate', item['prorate']))
line_items_elem.append(adj_elem)
elem.append(line_items_elem)
return elem
def _create_refund_invoice(self, element):
url = urljoin(self._url, '%s/refund' % (self.invoice_number, ))
body = ElementTree.tostring(element, encoding='UTF-8')
refund_invoice = Invoice()
refund_invoice.post(url, body)
return refund_invoice
def redemption(self):
try:
return self.redemptions()[0]
except AttributeError:
raise AttributeError("redemption")
class Subscription(Resource):
"""A customer account's subscription to your service."""
member_path = 'subscriptions/%s'
collection_path = 'subscriptions'
nodename = 'subscription'
attributes = (
'uuid',
'state',
'plan_code',
'coupon_code',
'coupon_codes',
'quantity',
'activated_at',
'canceled_at',
'starts_at',
'expires_at',
'current_period_started_at',
'current_period_ends_at',
'trial_started_at',
'trial_ends_at',
'unit_amount_in_cents',
'tax_in_cents',
'tax_type',
'tax_rate',
'total_billing_cycles',
'remaining_billing_cycles',
'timeframe',
'currency',
'subscription_add_ons',
'account',
'pending_subscription',
'net_terms',
'collection_method',
'po_number',
'first_renewal_date',
'bulk',
'terms_and_conditions',
'customer_notes',
'vat_reverse_charge_notes',
'bank_account_authorized_at',
'redemptions',
)
sensitive_attributes = ('number', 'verification_value', 'bulk')
def preview(self):
if hasattr(self, '_url'):
url = self._url + '/preview'
return self.post(url)
else:
url = urljoin(recurly.base_uri(), self.collection_path) + '/preview'
return self.post(url)
def update_notes(self, **kwargs):
"""Updates the notes on the subscription without generating a change"""
for key, val in kwargs.iteritems():
setattr(self, key, val)
url = urljoin(self._url, '%s/notes' % self.uuid)
self.put(url)
def _update(self):
if not hasattr(self, 'timeframe'):
self.timeframe = 'now'
return super(Subscription, self)._update()
def __getpath__(self, name):
if name == 'plan_code':
return 'plan/plan_code'
else:
return name
class TransactionBillingInfo(recurly.Resource):
node_name = 'billing_info'
attributes = (
'first_name',
'last_name',
'address1',
'address2',
'city',
'state',
'country',
'zip',
'phone',
'vat_number',
'first_six',
'last_four',
'card_type',
'month',
'year',
'transaction_uuid',
)
class TransactionAccount(recurly.Resource):
node_name = 'account'
attributes = (
'first_name',
'last_name',
'company',
'email',
'account_code',
)
_classes_for_nodename = {'billing_info': TransactionBillingInfo}
class TransactionDetails(recurly.Resource):
node_name = 'details'
attributes = ('account')
_classes_for_nodename = {'account': TransactionAccount}
class TransactionError(recurly.Resource):
node_name = 'transaction_error'
attributes = (
'id',
'merchant_message',
'error_caterogy',
'customer_message',
'error_code',
'gateway_error_code',
)
class Transaction(Resource):
"""An immediate one-time charge made to a customer's account."""
member_path = 'transactions/%s'
collection_path = 'transactions'
nodename = 'transaction'
attributes = (
'uuid',
'action',
'account',
'currency',
'amount_in_cents',
'tax_in_cents',
'status',
'reference',
'test',
'voidable',
'description',
'refundable',
'cvv_result',
'avs_result',
'avs_result_street',
'avs_result_postal',
'created_at',
'details',
'transaction_error',
'type',
'ip_address',
'tax_exempt',
'tax_code',
'accounting_code',
)
xml_attribute_attributes = ('type',)
sensitive_attributes = ('number', 'verification_value',)
_classes_for_nodename = {
'details': TransactionDetails,
'transaction_error': TransactionError
}
def _handle_refund_accepted(self, response):
if response.status != 202:
self.raise_http_error(response)
self._refund_transaction_url = response.getheader('Location')
return self
def get_refund_transaction(self):
"""Retrieve the refund transaction for this transaction, immediately
after refunding.
After calling `refund()` to refund a transaction, call this method to
retrieve the new transaction representing the refund.
"""
try:
url = self._refund_transaction_url
except AttributeError:
raise ValueError("No refund transaction is available for this transaction")
resp, elem = self.element_for_url(url)
value = self.value_for_element(elem)
return value
def refund(self, **kwargs):
"""Refund this transaction.
Calling this method returns the refunded transaction (that is,
``self``) if the refund was successful, or raises a `ResponseError` if
an error occurred requesting the refund. After a successful call to
`refund()`, to retrieve the new transaction representing the refund,
use the `get_refund_transaction()` method.
"""
# Find the URL and method to refund the transaction.
try:
selfnode = self._elem
except AttributeError:
raise AttributeError('refund')
url, method = None, None
for anchor_elem in selfnode.findall('a'):
if anchor_elem.attrib.get('name') == 'refund':
url = anchor_elem.attrib['href']
method = anchor_elem.attrib['method'].upper()
if url is None or method is None:
raise AttributeError("refund") # should do something more specific probably
actionator = self._make_actionator(url, method, extra_handler=self._handle_refund_accepted)
return actionator(**kwargs)
Transaction._classes_for_nodename['transaction'] = Transaction
class Plan(Resource):
"""A service level for your service to which a customer account
can subscribe."""
member_path = 'plans/%s'
collection_path = 'plans'
nodename = 'plan'
attributes = (
'plan_code',
'name',
'description',
'success_url',
'cancel_url',
'display_donation_amounts',
'display_quantity',
'display_phone_number',
'bypass_hosted_confirmation',
'unit_name',
'payment_page_tos_link',
'plan_interval_length',
'plan_interval_unit',
'trial_interval_length',
'trial_interval_unit',
'accounting_code',
'setup_fee_accounting_code',
'created_at',
'tax_exempt',
'tax_code',
'unit_amount_in_cents',
'setup_fee_in_cents',
'total_billing_cycles',
)
def get_add_on(self, add_on_code):
"""Return the `AddOn` for this plan with the given add-on code."""
url = urljoin(self._url, '%s/add_ons/%s' % (self.plan_code, add_on_code))
resp, elem = AddOn.element_for_url(url)
return AddOn.from_element(elem)
def create_add_on(self, add_on):
"""Make the given `AddOn` available to subscribers on this plan."""
url = urljoin(self._url, '%s/add_ons' % self.plan_code)
return add_on.post(url)
class AddOn(Resource):
"""An additional benefit a customer subscribed to a particular plan
can also subscribe to."""
nodename = 'add_on'
attributes = (
'add_on_code',
'name',
'display_quantity_on_hosted_page',
'display_quantity',
'default_quantity',
'accounting_code',
'unit_amount_in_cents',
'tax_code',
'created_at',
)
class SubscriptionAddOn(Resource):
"""A plan add-on as added to a customer's subscription.
Use these instead of `AddOn` instances when specifying a
`Subscription` instance's `subscription_add_ons` attribute.
"""
nodename = 'subscription_add_on'
inherits_currency = True
attributes = (
'add_on_code',
'quantity',
'unit_amount_in_cents',
'address',
)
class Note(Resource):
"""A customer account's notes."""
nodename = 'note'
collection_path = 'notes'
attributes = (
'message',
'created_at',
)
@classmethod
def from_element(cls, elem):
new_note = Note()
for child_el in elem:
if not child_el.tag:
continue
setattr(new_note, child_el.tag, child_el.text)
return new_note
Resource._learn_nodenames(locals().values())
def objects_for_push_notification(notification):
"""Decode a push notification with the given body XML.
Returns a dictionary containing the constituent objects of the push
notification. The kind of push notification is given in the ``"type"``
member of the returned dictionary.
"""
notification_el = ElementTree.fromstring(notification)
objects = {'type': notification_el.tag}
for child_el in notification_el:
tag = child_el.tag
res = Resource.value_for_element(child_el)
objects[tag] = res
return objects
|
{
"content_hash": "98c4cd83c6e449f77e576abc33003efe",
"timestamp": "",
"source": "github",
"line_count": 952,
"max_line_length": 99,
"avg_line_length": 27.98424369747899,
"alnum_prop": 0.5847753462707856,
"repo_name": "tbartelmess/recurly-client-python",
"id": "77ea59fadc59781f92a0b3723997770b4db2af19",
"size": "26641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recurly/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124761"
}
],
"symlink_target": ""
}
|
"""Import linear python op for backward compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
linear = tf.nn.linear
|
{
"content_hash": "fd16fa2ec8ad7d17d6893967c92c5b34",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 57,
"avg_line_length": 26.875,
"alnum_prop": 0.7581395348837209,
"repo_name": "moonboots/tensorflow",
"id": "b711fca170b8daec26cdb7849739b4069719773f",
"size": "892",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/models/rnn/linear.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151546"
},
{
"name": "C++",
"bytes": "7303140"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "678043"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16098"
},
{
"name": "Jupyter Notebook",
"bytes": "777976"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "101760"
},
{
"name": "Python",
"bytes": "4092357"
},
{
"name": "Shell",
"bytes": "77957"
},
{
"name": "TypeScript",
"bytes": "328860"
}
],
"symlink_target": ""
}
|
'''
Pypelib Database settings for SQLAlchemy
@author: SergioVidiella
'''
#
# Database parameters.
# DATABASE_HOST: it is usually 'localhost' (or 127.0.0.1).
# DATABASE_DIALECT: 'mysql', 'sqlite', 'postgresql', etc...
# DATABASE_DIALECT: use the form '+driver' e.g. '+psycopg2'. If any driver is used don't change it.
#
DATABASE_DIALECT = "changeMe"
DATABASE_DRIVER = ""
DATABASE_NAME = "changeMe"
DATABASE_USER = "changeMe"
DATABASE_PASSWORD = "changeMe"
DATABASE_HOST = "changeMe"
|
{
"content_hash": "5d684b413ee9c0e21fffb0bfb8289238",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 99,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.709278350515464,
"repo_name": "ict-felix/stack",
"id": "1852acd041c8a2124696903812b96ff26f34e67d",
"size": "485",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/resource/orchestrator/src/policies/pypelib/persistence/backends/sqlalchemy/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
}
|
from flask import request
from flask.ext.classy import FlaskView, route
from sqlalchemy.exc import IntegrityError
from smsgw.models import Tag
from smsgw.lib.utils import response
from smsgw.resources import decorators
from smsgw.resources.tags.schemas import post, put
from smsgw.resources.error.api import ErrorResource
from smsgw.core import db
class TagsResource(FlaskView):
""" Tags endpoints """
route_base = '/users/<uuid:user_uuid>/tags/'
@decorators.auth()
def index(self, **kwargs):
"""
Returning list of tags for specific user
"""
user = kwargs.get('user')
# search or not
search = request.args.get('search')
tags = user.tags
if search is not None:
like = "%{0}%".format(search)
tags = tags.filter(Tag._label.like(like))
tags = tags.order_by(Tag._label.asc())
return response([tag.to_dict() for tag in tags.all()])
@route('/<uuid:tag_uuid>/')
@decorators.auth()
def get(self, **kwargs):
"""
Get user tag
"""
tag = kwargs.get('tag')
return response(tag.to_dict())
@decorators.auth()
@decorators.jsonschema_validate(post.schema)
def post(self, **kwargs):
"""
Creating user tag
"""
try:
# save tag
user = kwargs.get('user')
tag = Tag(userId=user.id, **request.json)
db.session.add(tag)
db.session.commit()
except IntegrityError, e:
db.session.rollback()
raise ErrorResource(409, message="Tag is already exists.")
return response(tag.to_dict(), status_code=201)
@route('/<uuid:tag_uuid>/', methods=['PUT'])
@decorators.auth()
@decorators.jsonschema_validate(put.schema)
def put(self, **kwargs):
"""
Updating user tag
"""
try:
# save to db
tag = kwargs.get('tag')
tag.update(request.json)
db.session.commit()
except IntegrityError, e:
db.session.rollback()
raise ErrorResource(409, message="Tag is already exists.")
return response(tag.to_dict())
@route('/<uuid:tag_uuid>/', methods=['DELETE'])
@decorators.auth()
def delete(self, **kwargs):
"""
Delete user contact
"""
tag = kwargs.get('tag')
# delete template
db.session.delete(tag)
db.session.commit()
return response(tag.to_dict())
|
{
"content_hash": "4cab72e8611e4457c760c87e48bce739",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 70,
"avg_line_length": 25.928571428571427,
"alnum_prop": 0.5694608421881149,
"repo_name": "VojtechBartos/smsgw",
"id": "cd73b72af596771d2946102103765cc458eba565",
"size": "2631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smsgw/resources/tags/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34680"
},
{
"name": "HTML",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "143152"
},
{
"name": "Makefile",
"bytes": "212"
},
{
"name": "Mako",
"bytes": "526"
},
{
"name": "Python",
"bytes": "189569"
},
{
"name": "Shell",
"bytes": "464"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import sys
import warnings
from cinder import objects
# NOTE(geguileo): Sphinx will fail to generate the documentation if we are
# using decorators from any OVO in cinder.objects, because the OVOs are only
# added to the cinder.objects namespace when the CLI programs are run. So we
# need to run it here as well to avoid failures like:
# AttributeError: 'module' object has no attribute 'Volume'
objects.register_all()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'oslosphinx',
'stevedore.sphinxext',
'oslo_config.sphinxconfiggen',
'ext.cinder_driverlist',
]
config_generator_config_file = (
'../../cinder/config/cinder-config-generator.conf')
sample_config_basename = '_static/cinder'
# autodoc generation is a bit aggressive and a nuisance
# when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1"
# in your terminal to disable
if not os.getenv('SPHINX_DEBUG'):
extensions += ['ext.cinder_autodoc']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# Changing the path so that the Hudson build output contains GA code
# and the source docs do not contain the code so local, offline sphinx builds
# are "clean."
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cinder'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from cinder.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = [
'api_ext/rst_extension_template',
'installer',
]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['cinder.']
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/cinder-manage', 'cinder-manage', u'Cloud controller fabric',
[u'OpenStack'], 1)
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
except Exception:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'cinderdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Cinder.tex', u'Cinder Documentation',
u'Anso Labs, LLC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
|
{
"content_hash": "5825270b52f3c0655410ff5702c1c38d",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 79,
"avg_line_length": 33.12449799196787,
"alnum_prop": 0.6921677982541222,
"repo_name": "NetApp/cinder",
"id": "f47fc2fee87e284a84d3455705e26c8f7a55aea5",
"size": "9214",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17587090"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
__author__ = 'sstober'
import logging
log = logging.getLogger(__name__)
from pylearn2.utils.timing import log_timing
from pylearn2.train_extensions import TrainExtension
from pylearn2.space import CompositeSpace
import theano
from sklearn.metrics import confusion_matrix, classification_report
import numpy as np
class RNNMonitor(TrainExtension):
def __init__(self, model, dataset):
self.model = model
self.dataset = dataset
self.__dict__.update(locals())
def setup(self, model, dataset, algorithm):
self.data_specs = (CompositeSpace((
model.get_input_space(),
model.get_output_space())),
("features", "targets"))
minibatch = self.model.get_input_space().make_theano_batch()
# self.activation_fn = theano.function(
# inputs=[minibatch], outputs=self.model.fprop(minibatch, return_all=True))
self.output_fn = theano.function(
inputs=[minibatch], outputs=self.model.fprop(minibatch))
def on_monitor(self, model, dataset, algorithm):
# it = self.dataset.iterator('sequential', batch_size=1, data_specs=self.data_specs)
# y_real, y_pred, output = process_dataset(self.model,
# self.dataset,
# data_specs=self.data_specs,
# output_fn=self.output_fn,
# batch_size=128)
it = dataset.iterator(mode='sequential',
batch_size=128,
data_specs=self.data_specs)
y_pred = []
y_real = []
output = []
for minibatch, target in it:
# note: axis 0 and 1 are swapped
# frame_size, *, n_classes -> *, frame_size, n_classes
target = target.swapaxes(0,1)
out = self.output_fn(minibatch).swapaxes(0,1)
output.append(out)
# print out
# print out.shape
# print target.shape
y_pred.append(np.argmax(out, axis = 2))
y_real.append(np.argmax(target, axis = 2))
# print output[-1].shape
# print y_pred[-1].shape
# print y_real[-1].shape
y_pred = np.vstack(y_pred)
# print y_pred.shape
y_real = np.vstack(y_real)
# print y_real.shape
output = np.vstack(output)
y_pred = y_pred.flatten()
y_real = y_real.flatten()
# Compute confusion matrix
# print classification_report(y_real, y_pred)
cm = confusion_matrix(y_real, y_pred)
log.info('confusion\n{}'.format(cm))
print classification_report(y_real, y_pred)
class DataDumper(TrainExtension):
def __init__(self, model, dataset):
self.model = model
self.dataset = dataset
self.__dict__.update(locals())
def setup(self, model, dataset, algorithm):
# self.data_specs = (CompositeSpace((
# model.get_input_space(),
# model.get_output_space())),
# ("features", "targets"))
self.data_specs = self.dataset.get_data_specs()
print self.data_specs
it = dataset.iterator(mode='sequential',
batch_size=1,
return_tuple=True,
data_specs=self.data_specs)
i = 0
for sequence, target in it:
print '{}: {} -> {}'.format(i, sequence, target)
# print np.asarray(sequence, dtype=np.int)
# print np.asarray(target, dtype=np.int)
i += 1
break
minibatch = self.model.get_input_space().make_theano_batch()
# self.activation_fn = theano.function(
# inputs=[minibatch], outputs=self.model.fprop(minibatch, return_all=True))
self.output_fn = theano.function(
inputs=[minibatch], outputs=self.model.fprop(minibatch))
def on_monitor(self, model, dataset, algorithm):
it = dataset.iterator(mode='sequential',
batch_size=1,
return_tuple=True, # otherwise: "too many value to unpack"
data_specs=self.data_specs)
i = 0
for minibatch, target in it:
# note: axis 0 and 1 are swapped
# frame_size, *, n_classes -> *, frame_size, n_classes
target = target.swapaxes(0,1)
out = self.output_fn(minibatch).swapaxes(0,1)
print '{}: {} ->\n{}\t expected {}'.format(i, minibatch, out, target)
# print np.asarray(target, dtype=np.int)
# print out.shape
i += 1
if i > 5: break
|
{
"content_hash": "3853d3b6dcc42f53d509bd05dd732c06",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 92,
"avg_line_length": 32.82432432432432,
"alnum_prop": 0.5282009057225195,
"repo_name": "sstober/deepthought",
"id": "06f327da3860edad724a6b317df927cbb209e653",
"size": "4858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepthought/pylearn2ext/monitor/rnn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56147125"
},
{
"name": "PureBasic",
"bytes": "2515"
},
{
"name": "Python",
"bytes": "446130"
},
{
"name": "Shell",
"bytes": "4966"
}
],
"symlink_target": ""
}
|
import angr
import logging
l = logging.getLogger('angr.procedures.stubs.CallReturn')
class CallReturn(angr.SimProcedure):
NO_RET = True
def run(self):
l.info("A factory.call_state-created path returned!")
return
|
{
"content_hash": "410441a2a41907252ed9cfc83453ee26",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 61,
"avg_line_length": 21.727272727272727,
"alnum_prop": 0.694560669456067,
"repo_name": "tyb0807/angr",
"id": "23c9380bc1f29534ab9685017fe26d23ae2676d3",
"size": "239",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "angr/procedures/stubs/CallReturn.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39875"
},
{
"name": "Makefile",
"bytes": "610"
},
{
"name": "Python",
"bytes": "3884780"
}
],
"symlink_target": ""
}
|
""" Utilities to read namelist file
Originally adapted from
https://github.com/leifdenby/namelist_python
"""
from __future__ import print_function
import re
import warnings
from itertools import groupby
from .parameters import Param, Params
CONVERT_LOWERCASE = True
class Namelist(Params):
""" Parse / Format method specific to Namelist
"""
@classmethod
def parse(cls, string):
try:
params = _parse_nml(string)
except:
warnings.warn("some characters in the comments (likely / or \) namelist parsing, discard all comments")
params = _parse_nml(string)
return cls(params)
def format(self):
return _format_nml(self)
# Handy method to get/set param
def get_value(self, group, name):
""" access like a dict
"""
i = self.index(Param(name, group=group))
return self[i].value
def set_value(self, group, name, value):
""" access like a dict
"""
i = self.index(Param(name, group=group))
self[i].value = value
#
# Work for Namelist parsing and conversion
#
def _parse_nml(string, ignore_comments=False):
""" parse a string namelist, and returns a list of params
with four keys: name, value, help, group
"""
group_re = re.compile(r'&([^&]+)/', re.DOTALL) # allow blocks to span multiple lines
array_re = re.compile(r'(\w+)\((\d+)\)')
# string_re = re.compile(r"\'\s*\w[^']*\'")
string_re = re.compile(r"[\'\"]*[\'\"]")
# self._complex_re = re.compile(r'^\((\d+.?\d*),(\d+.?\d*)\)$')
# list of parameters
params = Params()
# groups = odict()
filtered_lines = []
for line in string.split('\n'):
line = line.strip()
if line == "":
continue
# remove comments, since they may have forward-slashes
# set ignore_comments to True is you want to keep them.
if line.startswith('!'):
continue
if ignore_comments and '!' in line:
line = line[:line.index('!')]
filtered_lines.append(line)
group_blocks = re.findall(group_re, "\n".join(filtered_lines))
for i, group_block in enumerate(group_blocks):
group_lines = group_block.split('\n')
group_name = group_lines.pop(0).strip()
# check for comments
if "!" in group_name:
i = group_name.index("!")
group_name = group_name[:i].strip()
group_help = group_name[i+1:].strip()
# some lines are continuation of previous lines: filter
joined_lines = []
for line in group_lines:
line = line.strip()
if '=' in line:
joined_lines.append(line)
else:
# continuation of previous line
joined_lines[-1] += line
group_lines = joined_lines
for line in group_lines:
name, value, comment = _parse_line(line)
param = {
"name": name,
"value": value,
"help": comment,
"group": group_name,
}
param = Param(**param)
# group[variable_name] = parsed_value
params.append(param)
# groups[group_name] = group
return params
def _parse_line(line):
"parse a line within a block"
# commas at the end of lines seem to be optional
comment = ""
if '!' in line:
sep = line.index("!")
comment = line[sep+1:].strip()
line = line[:sep].strip()
if line.endswith(','):
line = line[:-1]
k, v = line.split('=')
name = k.strip()
value = _parse_value(v.strip())
return name, value, comment
def _parse_value(variable_value):
"""
Tries to parse a single value, raises an exception if no single value is matched
"""
try:
parsed_value = int(variable_value)
except ValueError:
try:
parsed_value = float(variable_value)
except ValueError:
if variable_value.lower() in ['.true.', 't', 'true']:
# boolean
parsed_value = True
elif variable_value.lower() in ['.false.', 'f', 'false']:
parsed_value = False
elif variable_value.startswith("'") \
and variable_value.endswith("'") \
and variable_value.count("'") == 2 \
or variable_value.startswith('"') \
and variable_value.endswith('"') \
and variable_value.count('"') == 2:
parsed_value = variable_value[1:-1]
elif variable_value.startswith("/") and variable_value.endswith("/"):
# array /3,4,5/
parsed_value = _parse_array(variable_value[1:-1].split(','))
elif "," in variable_value:
# array 3, 4, 5
parsed_value = _parse_array(variable_value.split(','))
elif '*' in variable_value:
# 3*4 means [4, 4, 4, 4] ==> this is handled in _parse_array
parsed_value = _parse_array([variable_value])
elif len(variable_value.split()) > 1:
# array 3 4 5
parsed_value = _parse_array(variable_value.split())
else:
print("Parsing ERROR: >>>{}<<<".format(variable_value))
raise ValueError(variable_value)
return parsed_value
def _parse_array(values):
""" parse a list of (string) values representing a fortran array
and return a python list
"""
assert type(values) is list
parsed_value = []
for v in values:
if '*' in v:
# 3* "a" === "a", "a", "a"
mult, val = v.split('*')
parsed_value.extend(int(mult) * [ _parse_value(val.strip()) ])
else:
parsed_value.append(_parse_value(v))
return parsed_value
def _format_nml(params):
""" format a flat parameter list to be written in the namelist
"""
lines = []
for group_name, group_params in groupby(params, lambda x: x.group):
if group_name == "":
print(list(group_params))
raise ValueError("Group not defined. Cannot write to namelist.")
lines.append("&%s" % group_name)
for param in group_params:
if isinstance(param.value, list):
line = " %s = %s" % (param.name, " ".join([_format_value(v) for v in param.value]))
else:
line = " %s = %s" % (param.name, _format_value(param.value))
if param.help:
line += ' ! '+param.help
lines.append(line)
lines.append("/\n")
return "\n".join(lines)
def _format_value(value):
""" Format a value into fortran's namelist format (return a string)
"""
if isinstance(value, bool):
return value and '.true.' or '.false.'
elif isinstance(value, int):
return "%d" % value
elif isinstance(value, float):
# return "{:.3e}".format(value) # use exp. notation after 3 digits
return "{}".format(value) # use exp. notation after 3 digits
elif isinstance(value, basestring):
return "'%s'" % value
elif isinstance(value, complex):
return "(%s,%s)" % (_format_value(value.real), _format_value(value.imag))
else:
raise Exception("Variable type not understood: %s" % type(value))
|
{
"content_hash": "d2ea50f66182edc8254d26b053e840b3",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 115,
"avg_line_length": 34.11981566820276,
"alnum_prop": 0.5436250675310643,
"repo_name": "perrette/pyglacier",
"id": "ee3e155cc232e0b2a71a494b40c75710ec0af31b",
"size": "7404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyglacier/namelist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67188"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
}
|
import unittest
from datetime import date
import holidays
class TestRussia(unittest.TestCase):
def setUp(self):
self.holidays = holidays.RU()
def test_before_2005(self):
self.assertIn(date(2004, 11, 7), self.holidays)
self.assertNotIn(date(2004, 11, 4), self.holidays)
def test_2018(self):
# https://en.wikipedia.org/wiki/Public_holidays_in_Russia
self.assertIn(date(2018, 1, 1), self.holidays)
self.assertIn(date(2018, 1, 2), self.holidays)
self.assertIn(date(2018, 1, 3), self.holidays)
self.assertIn(date(2018, 1, 4), self.holidays)
self.assertIn(date(2018, 1, 5), self.holidays)
self.assertIn(date(2018, 1, 6), self.holidays)
self.assertIn(date(2018, 1, 7), self.holidays)
self.assertIn(date(2018, 1, 8), self.holidays)
self.assertIn(date(2018, 2, 23), self.holidays)
self.assertIn(date(2018, 3, 8), self.holidays)
self.assertIn(date(2018, 5, 1), self.holidays)
self.assertIn(date(2018, 5, 9), self.holidays)
self.assertIn(date(2018, 6, 12), self.holidays)
self.assertIn(date(2018, 11, 4), self.holidays)
self.assertNotIn(date(2018, 11, 7), self.holidays)
|
{
"content_hash": "5cd7507941285ef3b45a4163a6b69071",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 38.4375,
"alnum_prop": 0.6390243902439025,
"repo_name": "ryanss/holidays.py",
"id": "e5825b61e3ec0c3edb1138af5f09641470fcd182",
"size": "1732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/countries/test_russia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214061"
}
],
"symlink_target": ""
}
|
from twisted.internet.defer import inlineCallbacks, returnValue
from flask_restful import fields, marshal
from floranet.models.model import Model
class Reflector(Model):
"""LoRa reflector application server interface
This appserver interface bounces any messages received
from a device back to that device.
Attributes:
name (str): Application interface name
running (bool): Running flag
"""
TABLENAME = 'appif_reflector'
HASMANY = [{'name': 'appinterfaces', 'class_name': 'AppInterface', 'as': 'interfaces'}]
def afterInit(self):
self.started = False
self.appinterface = None
@inlineCallbacks
def start(self, netserver):
"""Start the application interface
Args:
netserver (NetServer): The LoRa network server
Returns True on success, False otherwise
"""
self.netserver = netserver
self.started = True
returnValue(True)
yield
def stop(self):
"""Stop the application interface"""
# Reflector does not require any shutdown
self.started = False
return
@inlineCallbacks
def valid(self):
"""Validate a Reflector object.
Returns:
valid (bool), message(dict): (True, empty) on success,
(False, error message dict) otherwise.
"""
returnValue((True, {}))
yield
def marshal(self):
"""Get REST API marshalled fields as an orderedDict
Returns:
OrderedDict of fields defined by marshal_fields
"""
marshal_fields = {
'type': fields.String(attribute='__class__.__name__'),
'id': fields.Integer(attribute='appinterface.id'),
'name': fields.String,
'started': fields.Boolean
}
return marshal(self, marshal_fields)
def netServerReceived(self, device, app, port, appdata):
"""Receive a application message from LoRa network server"""
# Send the message to the network server
self.netserver.inboundAppMessage(device.devaddr, appdata)
def datagramReceived(self, data, (host, port)):
"""Receive inbound application server data"""
pass
|
{
"content_hash": "4ce6a05cd171ca80fd241a5bbd54dcfc",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 91,
"avg_line_length": 29.96153846153846,
"alnum_prop": 0.5952075310226786,
"repo_name": "Fluent-networks/floranet",
"id": "480267d7a2adbe70d8893fa65d29a2f91501c1ff",
"size": "2337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "floranet/appserver/reflector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "493"
},
{
"name": "Python",
"bytes": "378157"
},
{
"name": "Shell",
"bytes": "485"
}
],
"symlink_target": ""
}
|
import argparse
import os
import dawn4py
from dawn4py.serialization import SIR, AST
from dawn4py.serialization import utils as serial_utils
from google.protobuf.json_format import MessageToJson, Parse
OUTPUT_NAME = "global_var_stencil"
OUTPUT_FILE = f"{OUTPUT_NAME}.cpp"
OUTPUT_PATH = f"{OUTPUT_NAME}.cpp"
def main(args: argparse.Namespace):
interval = serial_utils.make_interval(
AST.Interval.Start, AST.Interval.End, 0, 0)
body_ast = serial_utils.make_ast(
[
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("out", [0, 0, 0]),
serial_utils.make_binary_operator(serial_utils.make_var_access_expr(
"dt", is_external=True), "*", serial_utils.make_field_access_expr("in", [1, 0, 0])),
"="),
]
)
vertical_region_stmt = serial_utils.make_vertical_region_decl_stmt(
body_ast, interval, AST.VerticalRegion.Forward
)
globals = AST.GlobalVariableMap()
globals.map["dt"].double_value = 0.5
sir = serial_utils.make_sir(
OUTPUT_FILE,
AST.GridType.Value("Cartesian"),
[
serial_utils.make_stencil(
OUTPUT_NAME,
serial_utils.make_ast([vertical_region_stmt]),
[
serial_utils.make_field(
"in", serial_utils.make_field_dimensions_cartesian()),
serial_utils.make_field(
"out", serial_utils.make_field_dimensions_cartesian()),
],
)
],
global_variables=globals
)
# print the SIR
if args.verbose:
print(MessageToJson(sir))
# compile
code = dawn4py.compile(sir, backend=dawn4py.CodeGenBackend.CXXNaive)
# write to file
print(f"Writing generated code to '{OUTPUT_PATH}'")
with open(OUTPUT_PATH, "w") as f:
f.write(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a simple stencil with globals using Dawn compiler"
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Print the generated SIR",
)
main(parser.parse_args())
|
{
"content_hash": "b496c7c1c2c0dbb2bd677a5af0983e86",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 104,
"avg_line_length": 29.67948717948718,
"alnum_prop": 0.5805615550755939,
"repo_name": "MeteoSwiss-APN/dawn",
"id": "36b7d300866b0894f9603cf7d99736dc60f1d6c9",
"size": "2930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dawn/test/integration-test/dawn4py-tests/global_var.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3780402"
},
{
"name": "CMake",
"bytes": "125177"
},
{
"name": "Cuda",
"bytes": "73606"
},
{
"name": "Dockerfile",
"bytes": "4895"
},
{
"name": "Fortran",
"bytes": "2880"
},
{
"name": "Python",
"bytes": "265722"
},
{
"name": "Shell",
"bytes": "13029"
}
],
"symlink_target": ""
}
|
'''This program aims to generate the first 10 elements of a Fibonacci series of integers'''
import numpy
x = numpy.zeros(10, dtype=int)
a = 0
b = 1
c = a + b
ratio = c/b
i = 0
while ratio>10e-10:
ratio = c/b
i = i+1
#temp = a
a = b
b = c
c = a+b
print 'Value of Golden ratio is %f after %d iterations' %(ratio,i)
|
{
"content_hash": "b18b3bfd73454fb0a0de77738bcc6150",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 91,
"avg_line_length": 21.25,
"alnum_prop": 0.6,
"repo_name": "fsbd1285228/PythonCodes",
"id": "28967d7d8446d5cb2ca12103edff143f9c750b1a",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonStarter/7_3_4 Fibonacci Golden Ratio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20086"
}
],
"symlink_target": ""
}
|
"""SunPy Maps"""
from __future__ import absolute_import
__author__ = "Keith Hughitt"
__email__ = "keith.hughitt@nasa.gov"
import os
import numpy as np
from sunpy.map.map import Map
from sunpy.map.header import MapHeader
from sunpy.map.mapcube import MapCube
from sunpy.map.compositemap import CompositeMap
from sunpy.map.sources import *
def make_map(*args, **kwargs):
"""Processes one or more inputs and returns a Map, MapCube, or CompositeMap
instance.
Parameters
----------
args : filepath(s), data array
The data source used to create the map object. This can be either a
filepath to an image, a 2d list, or an ndarray.
type : {'composite' | 'cube'}
Type of multimap to construct when passed more than one input. The
default choice is a CompositeMap which is more lenient with respect
to how similar the input data is.
Returns
-------
out : Map, MapCube, CompositeMap
Returns a subclass instance
Examples
--------
>>> import sunpy
>>> sunpy.make_map("file.fts")
>>> sunpy.make_map("file1.fts", "file2.fts",..)
>>> sunpy.make_map(["file1.fts", "file2.fts",..])
>>> sunpy.make_map("path/to/files/*.fts")
>>> sunpy.make_map(Map)
>>> sunpy.make_map(Map1, Map2,..)
>>> sunpy.make_map([[0, 1],[2, 3]], {'telescop': 'sunpy',..})
"""
if len(args) is 0:
raise TypeError("Invalid input.")
# First check to see if data/header were passed in
if isinstance(args[0], list) or isinstance(args[0], np.ndarray):
data = None
# n-dimensional list
if isinstance(args[0][0], list) or isinstance(args[0], np.ndarray):
data = args[0]
else:
try:
float(args[0][0])
except (ValueError, TypeError):
pass
else:
# 1-dimensional data
data = args[0]
# if either of the above cases hold, then create a new Map
if data is not None:
if len(args) > 1:
return Map(args[0], args[1])
else:
return Map(args[0], {})
# If not, check for one or more maps or filepaths
if len(args) == 1:
# String
if isinstance(args[0], basestring):
filepath = os.path.expanduser(args[0])
# Wildcard string
if filepath.find("*") != -1:
import glob
maps = glob.glob(filepath)
# Directory (use all files)
elif os.path.isdir(filepath):
maps = [os.path.join(filepath, x) for x in os.listdir(filepath)]
# Filepath
else:
return Map.read(filepath)
# Map/MapCube/CompositeMap
elif (isinstance(args[0], Map) or
isinstance(args[0], CompositeMap) or
isinstance(args[0], MapCube)):
return args[0]
# List of filepaths or Maps
elif isinstance(args[0], list):
# list of maps or filepaths
maps = args[0]
# Unrecognized input
else:
raise InvalidMapInput("Invalid input for make_map. Please specify "
"one or more filepaths, Maps, directories, "
"or wildcard expressions.")
else:
maps = args
# Make sure we found some data
if len(maps) is 0:
raise NoMapsFound("Specified path contains no valid files.")
mtype = kwargs.get("type", "composite")
# MapCube
if mtype == "cube":
return MapCube(*maps)
# CompositeMap (default)
elif mtype == "composite":
return CompositeMap(*maps)
else:
raise InvalidMapType("Invalid multi-map type specified. Please choose "
"between 'composite' or 'cube'.")
def read_header(filepath):
"""Parses a file header and return some important parameters"""
return Map.read_header(filepath)
class InvalidMapInput(ValueError):
"""Exception to raise when input variable is not a Map instance and does
not point to a valid Map input file."""
pass
class InvalidMapType(ValueError):
"""Exception to raise when an invalid type of map is requested with make_map
"""
pass
class NoMapsFound(ValueError):
"""Exception to raise when input does not point to any valid maps or files
"""
pass
|
{
"content_hash": "30eda9adfd0d53b35f766c6b787dbf2c",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 31.923076923076923,
"alnum_prop": 0.5583789704271632,
"repo_name": "jslhs/sunpy",
"id": "2501a0978753bc51625a5150673fd962f500fcc4",
"size": "4565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sunpy/map/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
"""
test_permutive
----------------------------------
Tests for `permutive` module.
"""
import sys
import unittest
from permutive import Permutive
class TestPermutive(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_permutive(self):
permutive = Permutive('12345')
self.assertTrue(isinstance(permutive, Permutive))
|
{
"content_hash": "eb8a05a57497d63d78379c8b1f79a7f9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 57,
"avg_line_length": 15.76,
"alnum_prop": 0.6116751269035533,
"repo_name": "tailsdotcom/permutive-sdk",
"id": "a1ceee44c7b3bb7a8c8e654b4e83288fa1149fc9",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_permutive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "15763"
}
],
"symlink_target": ""
}
|
"""Generic testing tools.
Authors
-------
- Fernando Perez <Fernando.Perez@berkeley.edu>
"""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Copyright (C) 2009 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import inspect
import os
import re
import sys
import tempfile
from contextlib import contextmanager
from io import StringIO
from subprocess import Popen, PIPE
try:
# These tools are used by parts of the runtime, so we make the nose
# dependency optional at this point. Nose is a hard dependency to run the
# test suite, but NOT to use ipython itself.
import nose.tools as nt
has_nose = True
except ImportError:
has_nose = False
from IPython.config.loader import Config
from IPython.utils.process import get_output_error_code
from IPython.utils.text import list_strings
from IPython.utils.io import temp_pyfile, Tee
from IPython.utils import py3compat
from IPython.utils.encoding import DEFAULT_ENCODING
from . import decorators as dec
from . import skipdoctest
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
# The docstring for full_path doctests differently on win32 (different path
# separator) so just skip the doctest there. The example remains informative.
doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco
@doctest_deco
def full_path(startPath,files):
"""Make full paths for all the listed files, based on startPath.
Only the base part of startPath is kept, since this routine is typically
used with a script's __file__ variable as startPath. The base of startPath
is then prepended to all the listed files, forming the output list.
Parameters
----------
startPath : string
Initial path to use as the base for the results. This path is split
using os.path.split() and only its first component is kept.
files : string or list
One or more files.
Examples
--------
>>> full_path('/foo/bar.py',['a.txt','b.txt'])
['/foo/a.txt', '/foo/b.txt']
>>> full_path('/foo',['a.txt','b.txt'])
['/a.txt', '/b.txt']
If a single file is given, the output is still a list:
>>> full_path('/foo','a.txt')
['/a.txt']
"""
files = list_strings(files)
base = os.path.split(startPath)[0]
return [ os.path.join(base,f) for f in files ]
def parse_test_output(txt):
"""Parse the output of a test run and return errors, failures.
Parameters
----------
txt : str
Text output of a test run, assumed to contain a line of one of the
following forms::
'FAILED (errors=1)'
'FAILED (failures=1)'
'FAILED (errors=1, failures=1)'
Returns
-------
nerr, nfail: number of errors and failures.
"""
err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE)
if err_m:
nerr = int(err_m.group(1))
nfail = 0
return nerr, nfail
fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE)
if fail_m:
nerr = 0
nfail = int(fail_m.group(1))
return nerr, nfail
both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt,
re.MULTILINE)
if both_m:
nerr = int(both_m.group(1))
nfail = int(both_m.group(2))
return nerr, nfail
# If the input didn't match any of these forms, assume no error/failures
return 0, 0
# So nose doesn't think this is a test
parse_test_output.__test__ = False
def default_argv():
"""Return a valid default argv for creating testing instances of ipython"""
return ['--quick', # so no config file is loaded
# Other defaults to minimize side effects on stdout
'--colors=NoColor', '--no-term-title','--no-banner',
'--autocall=0']
def default_config():
"""Return a config object with good defaults for testing."""
config = Config()
config.TerminalInteractiveShell.colors = 'NoColor'
config.TerminalTerminalInteractiveShell.term_title = False,
config.TerminalInteractiveShell.autocall = 0
config.HistoryManager.hist_file = tempfile.mktemp(u'test_hist.sqlite')
config.HistoryManager.db_cache_size = 10000
return config
def get_ipython_cmd(as_string=False):
"""
Return appropriate IPython command line name. By default, this will return
a list that can be used with subprocess.Popen, for example, but passing
`as_string=True` allows for returning the IPython command as a string.
Parameters
----------
as_string: bool
Flag to allow to return the command as a string.
"""
# FIXME: remove workaround for 2.6 support
if sys.version_info[:2] > (2,6):
ipython_cmd = [sys.executable, "-m", "IPython"]
else:
ipython_cmd = ["ipython"]
if as_string:
ipython_cmd = " ".join(ipython_cmd)
return ipython_cmd
def ipexec(fname, options=None):
"""Utility to call 'ipython filename'.
Starts IPython with a minimal and safe configuration to make startup as fast
as possible.
Note that this starts IPython in a subprocess!
Parameters
----------
fname : str
Name of file to be executed (should have .py or .ipy extension).
options : optional, list
Extra command-line flags to be passed to IPython.
Returns
-------
(stdout, stderr) of ipython subprocess.
"""
if options is None: options = []
# For these subprocess calls, eliminate all prompt printing so we only see
# output from script execution
prompt_opts = [ '--PromptManager.in_template=""',
'--PromptManager.in2_template=""',
'--PromptManager.out_template=""'
]
cmdargs = default_argv() + prompt_opts + options
test_dir = os.path.dirname(__file__)
ipython_cmd = get_ipython_cmd()
# Absolute path for filename
full_fname = os.path.join(test_dir, fname)
full_cmd = ipython_cmd + cmdargs + [full_fname]
p = Popen(full_cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out, err = py3compat.bytes_to_str(out), py3compat.bytes_to_str(err)
# `import readline` causes 'ESC[?1034h' to be output sometimes,
# so strip that out before doing comparisons
if out:
out = re.sub(r'\x1b\[[^h]+h', '', out)
return out, err
def ipexec_validate(fname, expected_out, expected_err='',
options=None):
"""Utility to call 'ipython filename' and validate output/error.
This function raises an AssertionError if the validation fails.
Note that this starts IPython in a subprocess!
Parameters
----------
fname : str
Name of the file to be executed (should have .py or .ipy extension).
expected_out : str
Expected stdout of the process.
expected_err : optional, str
Expected stderr of the process.
options : optional, list
Extra command-line flags to be passed to IPython.
Returns
-------
None
"""
import nose.tools as nt
out, err = ipexec(fname, options)
#print 'OUT', out # dbg
#print 'ERR', err # dbg
# If there are any errors, we must check those befor stdout, as they may be
# more informative than simply having an empty stdout.
if err:
if expected_err:
nt.assert_equal("\n".join(err.strip().splitlines()), "\n".join(expected_err.strip().splitlines()))
else:
raise ValueError('Running file %r produced error: %r' %
(fname, err))
# If no errors or output on stderr was expected, match stdout
nt.assert_equal("\n".join(out.strip().splitlines()), "\n".join(expected_out.strip().splitlines()))
class TempFileMixin(object):
"""Utility class to create temporary Python/IPython files.
Meant as a mixin class for test cases."""
def mktmp(self, src, ext='.py'):
"""Make a valid python temp file."""
fname, f = temp_pyfile(src, ext)
self.tmpfile = f
self.fname = fname
def tearDown(self):
if hasattr(self, 'tmpfile'):
# If the tmpfile wasn't made because of skipped tests, like in
# win32, there's nothing to cleanup.
self.tmpfile.close()
try:
os.unlink(self.fname)
except:
# On Windows, even though we close the file, we still can't
# delete it. I have no clue why
pass
pair_fail_msg = ("Testing {0}\n\n"
"In:\n"
" {1!r}\n"
"Expected:\n"
" {2!r}\n"
"Got:\n"
" {3!r}\n")
def check_pairs(func, pairs):
"""Utility function for the common case of checking a function with a
sequence of input/output pairs.
Parameters
----------
func : callable
The function to be tested. Should accept a single argument.
pairs : iterable
A list of (input, expected_output) tuples.
Returns
-------
None. Raises an AssertionError if any output does not match the expected
value.
"""
name = getattr(func, "func_name", getattr(func, "__name__", "<unknown>"))
for inp, expected in pairs:
out = func(inp)
assert out == expected, pair_fail_msg.format(name, inp, expected, out)
if py3compat.PY3:
MyStringIO = StringIO
else:
# In Python 2, stdout/stderr can have either bytes or unicode written to them,
# so we need a class that can handle both.
class MyStringIO(StringIO):
def write(self, s):
s = py3compat.cast_unicode(s, encoding=DEFAULT_ENCODING)
super(MyStringIO, self).write(s)
notprinted_msg = """Did not find {0!r} in printed output (on {1}):
-------
{2!s}
-------
"""
class AssertPrints(object):
"""Context manager for testing that code prints certain text.
Examples
--------
>>> with AssertPrints("abc", suppress=False):
... print "abcd"
... print "def"
...
abcd
def
"""
def __init__(self, s, channel='stdout', suppress=True):
self.s = s
self.channel = channel
self.suppress = suppress
def __enter__(self):
self.orig_stream = getattr(sys, self.channel)
self.buffer = MyStringIO()
self.tee = Tee(self.buffer, channel=self.channel)
setattr(sys, self.channel, self.buffer if self.suppress else self.tee)
def __exit__(self, etype, value, traceback):
self.tee.flush()
setattr(sys, self.channel, self.orig_stream)
printed = self.buffer.getvalue()
assert self.s in printed, notprinted_msg.format(self.s, self.channel, printed)
return False
printed_msg = """Found {0!r} in printed output (on {1}):
-------
{2!s}
-------
"""
class AssertNotPrints(AssertPrints):
"""Context manager for checking that certain output *isn't* produced.
Counterpart of AssertPrints"""
def __exit__(self, etype, value, traceback):
self.tee.flush()
setattr(sys, self.channel, self.orig_stream)
printed = self.buffer.getvalue()
assert self.s not in printed, printed_msg.format(self.s, self.channel, printed)
return False
@contextmanager
def mute_warn():
from IPython.utils import warn
save_warn = warn.warn
warn.warn = lambda *a, **kw: None
try:
yield
finally:
warn.warn = save_warn
@contextmanager
def make_tempfile(name):
""" Create an empty, named, temporary file for the duration of the context.
"""
f = open(name, 'w')
f.close()
try:
yield
finally:
os.unlink(name)
@contextmanager
def monkeypatch(obj, name, attr):
"""
Context manager to replace attribute named `name` in `obj` with `attr`.
"""
orig = getattr(obj, name)
setattr(obj, name, attr)
yield
setattr(obj, name, orig)
def help_output_test(subcommand=''):
"""test that `ipython [subcommand] -h` works"""
cmd = ' '.join(get_ipython_cmd() + [subcommand, '-h'])
out, err, rc = get_output_error_code(cmd)
nt.assert_equal(rc, 0, err)
nt.assert_not_in("Traceback", err)
nt.assert_in("Options", out)
nt.assert_in("--help-all", out)
return out, err
def help_all_output_test(subcommand=''):
"""test that `ipython [subcommand] --help-all` works"""
cmd = ' '.join(get_ipython_cmd() + [subcommand, '--help-all'])
out, err, rc = get_output_error_code(cmd)
nt.assert_equal(rc, 0, err)
nt.assert_not_in("Traceback", err)
nt.assert_in("Options", out)
nt.assert_in("Class parameters", out)
return out, err
|
{
"content_hash": "d4c48448e8c2eacfeca07aad7ed8e86d",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 110,
"avg_line_length": 30.05239179954442,
"alnum_prop": 0.5977412264079436,
"repo_name": "marcoantoniooliveira/labweb",
"id": "0bd3f7ae9310c44cd6636ca9acc6c1c39febd123",
"size": "13193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/IPython/testing/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
from django.db import models
from softdeletes.models import SoftDeletable
class Animal(SoftDeletable, models.Model):
CARNIVORE = 'c'
HERIVORE = 'h'
OMNIVORE = 'o'
EAT_CHOICES = (
(CARNIVORE, 'c'),
(HERIVORE, 'h'),
(OMNIVORE, 'o')
)
name = models.CharField(max_length=100)
eats = models.CharField(max_length=50, choices=EAT_CHOICES)
threatened = models.BooleanField(default=False)
class Jungle(SoftDeletable, models.Model):
name = models.CharField(max_length=100)
area = models.PositiveIntegerField()
apex_predator = models.ForeignKey(Animal)
class AnimalInJungle(SoftDeletable, models.Model):
animal = models.ForeignKey(Animal)
jungle = models.ForeignKey(Jungle)
count = models.PositiveIntegerField()
|
{
"content_hash": "3b726e2b2559874b93b61cb09f41144a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 63,
"avg_line_length": 27.17241379310345,
"alnum_prop": 0.6890862944162437,
"repo_name": "upgrad/django-deletes",
"id": "d50cd7b5459051fbbdb2ea4aee7205e138b3b11e",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testproject/testapp/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20236"
}
],
"symlink_target": ""
}
|
""" io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
If unspecified, `sep` defaults to '\s+'
Returns
-------
parsed : DataFrame
"""
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t seperation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines)>1 and len(counts) == 1 and counts.pop() != 0:
kwargs['sep'] = '\t'
if kwargs.get('sep') is None and kwargs.get('delim_whitespace') is None:
kwargs['sep'] = '\s+'
return read_table(StringIO(text), **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
obj.to_csv(buf, sep=sep, **kwargs)
clipboard_set(buf.getvalue())
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
|
{
"content_hash": "b3c4e78c3b85b00b8e71110d383bbe11",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 76,
"avg_line_length": 30.071428571428573,
"alnum_prop": 0.5924669155072956,
"repo_name": "stevenzhang18/Indeed-Flask",
"id": "dfa46156aaead4bd92895fd4ca65fba63474c092",
"size": "2947",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "lib/pandas/io/clipboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "45061"
},
{
"name": "HTML",
"bytes": "1386611"
},
{
"name": "JavaScript",
"bytes": "84693"
},
{
"name": "Python",
"bytes": "10498302"
}
],
"symlink_target": ""
}
|
"""
tests.atom
~~~~~~~~~~
Tests the cache system
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import datetime
import pytest
from werkzeug.contrib.atom import format_iso8601, AtomFeed, FeedEntry
class TestAtomFeed(object):
"""
Testcase for the `AtomFeed` class
"""
def test_atom_no_args(self):
with pytest.raises(ValueError):
AtomFeed()
def test_atom_title_no_id(self):
with pytest.raises(ValueError):
AtomFeed(title='test_title')
def test_atom_add_one(self):
a = AtomFeed(title='test_title', id=1)
f = FeedEntry(
title='test_title', id=1, updated=datetime.datetime.now())
assert len(a.entries) == 0
a.add(f)
assert len(a.entries) == 1
def test_atom_add_one_kwargs(self):
a = AtomFeed(title='test_title', id=1)
assert len(a.entries) == 0
a.add(title='test_title', id=1, updated=datetime.datetime.now())
assert len(a.entries) == 1
assert isinstance(a.entries[0], FeedEntry)
def test_atom_to_str(self):
updated_time = datetime.datetime.now()
expected_repr = '''
<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">test_title</title>
<id>1</id>
<updated>%s</updated>
<generator>Werkzeug</generator>
</feed>
''' % format_iso8601(updated_time)
a = AtomFeed(title='test_title', id=1, updated=updated_time)
assert str(a).strip().replace(' ', '') == \
expected_repr.strip().replace(' ', '')
class TestFeedEntry(object):
"""
Test case for the `FeedEntry` object
"""
def test_feed_entry_no_args(self):
with pytest.raises(ValueError):
FeedEntry()
def test_feed_entry_no_id(self):
with pytest.raises(ValueError):
FeedEntry(title='test_title')
def test_feed_entry_no_updated(self):
with pytest.raises(ValueError):
FeedEntry(title='test_title', id=1)
def test_feed_entry_to_str(self):
updated_time = datetime.datetime.now()
expected_feed_entry_str = '''
<entry>
<title type="text">test_title</title>
<id>1</id>
<updated>%s</updated>
</entry>
''' % format_iso8601(updated_time)
f = FeedEntry(title='test_title', id=1, updated=updated_time)
assert str(f).strip().replace(' ', '') == \
expected_feed_entry_str.strip().replace(' ', '')
def test_format_iso8601():
# naive datetime should be treated as utc
dt = datetime.datetime(2014, 8, 31, 2, 5, 6)
assert format_iso8601(dt) == '2014-08-31T02:05:06Z'
# tz-aware datetime
dt = datetime.datetime(2014, 8, 31, 11, 5, 6, tzinfo=KST())
assert format_iso8601(dt) == '2014-08-31T11:05:06+09:00'
class KST(datetime.tzinfo):
"""KST implementation for test_format_iso8601()."""
def utcoffset(self, dt):
return datetime.timedelta(hours=9)
def tzname(self, dt):
return 'KST'
def dst(self, dt):
return datetime.timedelta(0)
|
{
"content_hash": "e7daf6e7abc882f3c151e2ff23bbde63",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 72,
"avg_line_length": 28.548672566371682,
"alnum_prop": 0.5796652200867948,
"repo_name": "pjknkda/werkzeug",
"id": "e79e6c68fd47114818a4f179f4ec6efa338c24eb",
"size": "3250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/contrib/test_atom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Makefile",
"bytes": "972"
},
{
"name": "Python",
"bytes": "1084034"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oslo_log import log as logging
from congress.api import base
from congress.api import error_codes
from congress.api import webservice
from congress import exception
LOG = logging.getLogger(__name__)
class LibraryPolicyModel(base.APIModel):
"""Model for handling API requests about Library Policies."""
# Note(thread-safety): blocking function
def get_items(self, params, context=None):
"""Get items in model.
:param: params: A dict-like object containing parameters
from the request query string and body.
The name parameter filters results by name policy name.
:param: context: Key-values providing frame of reference of request
:returns: A dict containing at least a 'results' key whose value is
a list of items in the model. Additional keys set in the
dict will also be rendered for the user.
"""
include_rules = True
if params.get('include_rules', 'true').lower() == 'false':
include_rules = False
try:
# Note: name is included as a filtering parameter in get_items
# rather than a key in get_item because the API does not commit to
# library policy name being unique.
if 'name' in params:
# Note(thread-safety): blocking call
try:
policy = self.invoke_rpc(
base.LIBRARY_SERVICE_ID, 'get_policy_by_name',
{'name': params['name'],
'include_rules': include_rules})
return {"results": [policy]}
except KeyError: # not found
return {"results": []}
else:
# Note(thread-safety): blocking call
return {"results": self.invoke_rpc(
base.LIBRARY_SERVICE_ID,
'get_policies', {'include_rules': include_rules})}
except exception.CongressException as e:
raise webservice.DataModelException.create(e)
# Note(thread-safety): blocking function
def get_item(self, id_, params, context=None):
"""Retrieve item with id from model.
:param: id\_: The id of the item to retrieve
:param: params: A dict-like object containing parameters
from the request query string and body.
:param: context: Key-values providing frame of reference of request
:returns: The matching item or None if no item with id exists.
"""
try:
# Note(thread-safety): blocking call
include_rules = True
if params.get('include_rules', 'true').lower() == 'false':
include_rules = False
return self.invoke_rpc(base.LIBRARY_SERVICE_ID,
'get_policy',
{'id_': id_,
'include_rules': include_rules})
except exception.CongressException as e:
raise webservice.DataModelException.create(e)
# Note(thread-safety): blocking function
def add_item(self, item, params, id_=None, context=None):
"""Add item to model.
:param: item: The item to add to the model
:param: params: A dict-like object containing parameters
from the request query string and body.
:param: id\_: The unique name of the item
:param: context: Key-values providing frame of reference of request
:returns: Tuple of (ID, newly_created_item)
:raises KeyError: ID already exists.
:raises DataModelException: Addition cannot be performed.
"""
if id_ is not None:
(num, desc) = error_codes.get('policy_id_must_not_be_provided')
raise webservice.DataModelException(num, desc)
try:
# Note(thread-safety): blocking call
policy_metadata = self.invoke_rpc(
base.LIBRARY_SERVICE_ID, 'create_policy',
{'policy_dict': item})
except exception.CongressException as e:
raise webservice.DataModelException.create(e)
return (policy_metadata['id'], policy_metadata)
# Note(thread-safety): blocking function
def delete_item(self, id_, params, context=None):
"""Remove item from model.
:param: id\_: The unique name of the item to be removed
:param: params:
:param: context: Key-values providing frame of reference of request
:returns: The removed item.
:raises KeyError: Item with specified id\_ not present.
"""
# Note(thread-safety): blocking call
return self.invoke_rpc(base.LIBRARY_SERVICE_ID,
'delete_policy',
{'id_': id_})
def replace_item(self, id_, item, params, context=None):
"""Replace item with id\_ with new data.
:param: id\_: The ID of the item to be replaced
:param: item: The new item
:param: params: A dict-like object containing parameters
from the request query string and body.
:param: context: Key-values providing frame of reference of request
:returns: The new item after replacement.
:raises KeyError: Item with specified id\_ not present.
"""
# Note(thread-safety): blocking call
try:
return self.invoke_rpc(base.LIBRARY_SERVICE_ID,
'replace_policy',
{'id_': id_,
'policy_dict': item})
except exception.CongressException as e:
raise webservice.DataModelException.create(e)
|
{
"content_hash": "d6517be58c6ff0518ba2f42d39d5c8d9",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 40.710344827586205,
"alnum_prop": 0.5763171268846349,
"repo_name": "openstack/congress",
"id": "9f4c62aa483e97f600e4915e3e599763f299b202",
"size": "6535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/api/library_policy_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2614028"
},
{
"name": "Shell",
"bytes": "45786"
}
],
"symlink_target": ""
}
|
'''
Counter enabled Ansible callback plugin (See DOCUMENTATION for more information)
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from ansible.template import Templar
from ansible.playbook.task_include import TaskInclude
DOCUMENTATION = '''
callback: counter_enabled
type: stdout
short_description: adds counters to the output items (tasks and hosts/task)
version_added: "2.7"
description:
- Use this callback when you need a kind of progress bar on a large environments.
- You will know how many tasks has the playbook to run, and which one is actually running.
- You will know how many hosts may run a task, and which of them is actually running.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
'''
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'counter_enabled'
_task_counter = 1
_task_total = 0
_host_counter = 1
_host_total = 0
def __init__(self):
super(CallbackModule, self).__init__()
self._playbook = ""
self._play = ""
def _all_vars(self, host=None, task=None):
# host and task need to be specified in case 'magic variables' (host vars, group vars, etc)
# need to be loaded as well
return self._play.get_variable_manager().get_vars(
play=self._play,
host=host,
task=task
)
def v2_playbook_on_start(self, playbook):
self._playbook = playbook
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = u"play"
else:
msg = u"PLAY [%s]" % name
self._play = play
self._display.banner(msg)
self._play = play
self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
self._task_total = len(self._play.get_tasks()[0])
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for host in hosts:
stat = stats.summarize(host)
self._display.display(u"%s : %s %s %s %s %s %s" % (
hostcolor(host, stat),
colorize(u'ok', stat['ok'], C.COLOR_OK),
colorize(u'changed', stat['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', stat['failures'], C.COLOR_ERROR),
colorize(u'rescued', stat['rescued'], C.COLOR_OK),
colorize(u'ignored', stat['ignored'], C.COLOR_WARN)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s %s %s" % (
hostcolor(host, stat, False),
colorize(u'ok', stat['ok'], None),
colorize(u'changed', stat['changed'], None),
colorize(u'unreachable', stat['unreachable'], None),
colorize(u'failed', stat['failures'], None),
colorize(u'rescued', stat['rescued'], None),
colorize(u'ignored', stat['ignored'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats
if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom:
# fallback on constants for inherited plugins missing docs
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
def v2_playbook_on_task_start(self, task, is_conditional):
args = ''
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it there yet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = ', '.join(('%s=%s' % a for a in task.args.items()))
args = ' %s' % args
self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
self._host_counter = 0
self._task_counter += 1
def v2_runner_on_ok(self, result):
self._host_counter += 1
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_failed(self, result, ignore_errors=False):
self._host_counter += 1
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_ERROR)
else:
self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_ERROR)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_skipped(self, result):
self._host_counter += 1
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
self._host_counter += 1
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
|
{
"content_hash": "f2342d6bfe01e3dabd3285df8e4f26bf",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 147,
"avg_line_length": 43.522448979591836,
"alnum_prop": 0.5496576948325987,
"repo_name": "thaim/ansible",
"id": "1c6e6697881d1c2ecd63951af1cae6ffc1a166d3",
"size": "10824",
"binary": false,
"copies": "22",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/plugins/callback/counter_enabled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
FUNCTION( 'void nom_send_initial_message( ANY context, ANY this, ANY that )', """
nom_send_message( context, this, this, that ) ;
""" )
FUNCTION( 'void nom_send_message( ANY context, ANY action, ANY this, ANY that )', """
nom_do_sync( FRAME__TASK_new( $CA(FRAME__APPLICATION_new( context, this )), action, that ) ) ;
""" )
FRAME( 'APPLICATION',
attributes = [
A( 'ANY', 'this' ),
],
methods = [
MS( ARG( CW( 'this' ) ), """
JUMP__return_ANY( CONTEXT, CONTEXT, ACTION->this ) ;
""" ),
MS( ARG( CW( 'callerContext' ) ), """
JUMP__return_ANY( CONTEXT, CONTEXT, $CA(ACTION->parent) ) ;
""" ),
],
dump = D( '%s %s', '$DUMP( object->this ), $DUMP( object->parent )' )
)
|
{
"content_hash": "b69ef325e740f38d8e633238f5e4c086",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 96,
"avg_line_length": 29.75,
"alnum_prop": 0.5588235294117647,
"repo_name": "thomasmf/nomenine",
"id": "33a8d6491c349a8a8dc3c444425c828b9a9bff2b",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/runtime/evaluation/application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "442"
},
{
"name": "CSS",
"bytes": "12720"
},
{
"name": "Makefile",
"bytes": "1088"
},
{
"name": "Python",
"bytes": "178772"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('polls', '0023_auto_20150831_1711'),
]
operations = [
migrations.AddField(
model_name='pollrun',
name='pollrun_type',
field=models.CharField(default='', max_length=1, editable=False, choices=[('p', 'Propagated to sub-children'), ('r', 'Single Region'), ('u', 'Universal')]),
preserve_default=False,
),
migrations.AlterField(
model_name='pollrun',
name='conducted_on',
field=models.DateTimeField(default=django.utils.timezone.now, help_text='When the poll was conducted'),
),
migrations.AlterField(
model_name='pollrun',
name='region',
field=models.ForeignKey(blank=True, to='groups.Region', help_text='Region where the poll was conducted.', null=True),
),
migrations.AlterField(
model_name='question',
name='type',
field=models.CharField(max_length=1, choices=[('C', 'Multiple Choice'), ('K', 'Keypad'), ('M', 'Menu'), ('O', 'Open Ended'), ('N', 'Numeric'), ('R', 'Recording')]),
),
migrations.AlterField(
model_name='response',
name='status',
field=models.CharField(help_text='Current status of this response', max_length=1, verbose_name='Status', choices=[('P', 'Partial'), ('C', 'Complete'), ('E', 'Empty')]),
),
]
|
{
"content_hash": "f1322a8811ad4f5356fb02e4648b9313",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 180,
"avg_line_length": 39.65,
"alnum_prop": 0.5737704918032787,
"repo_name": "xkmato/tracpro",
"id": "4ef79b17fb25e38f1b9222068205c06f2ab1451e",
"size": "1610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tracpro/polls/migrations/0024_auto_20150907_1829.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27726"
},
{
"name": "CoffeeScript",
"bytes": "10296"
},
{
"name": "HTML",
"bytes": "107840"
},
{
"name": "JavaScript",
"bytes": "25237"
},
{
"name": "Makefile",
"bytes": "1962"
},
{
"name": "Python",
"bytes": "406848"
},
{
"name": "SaltStack",
"bytes": "19566"
},
{
"name": "Scheme",
"bytes": "29815"
},
{
"name": "Shell",
"bytes": "205447"
}
],
"symlink_target": ""
}
|
"""example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from app.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', index),
]
|
{
"content_hash": "76685153998627495c94350168ecaedf",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 33.958333333333336,
"alnum_prop": 0.694478527607362,
"repo_name": "herald-it/django-yamaps",
"id": "92f73478004bc4cba393329ef32a80b752f0cbc5",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/example/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "282"
},
{
"name": "JavaScript",
"bytes": "2818"
},
{
"name": "Python",
"bytes": "17409"
}
],
"symlink_target": ""
}
|
"""
test_mimpy
----------------------------------
Tests for `mimpy` module.
"""
from __future__ import absolute_import
import unittest
import mimpy
class TestMimpy(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e4ec9bae3dcd9289821b7fc954960599",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 38,
"avg_line_length": 13.615384615384615,
"alnum_prop": 0.5480225988700564,
"repo_name": "ohinai/mimpy",
"id": "a177ef0fbdc981288c61832fcd7a4550ab35fa75",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_mimpy.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1654"
},
{
"name": "Python",
"bytes": "328582"
}
],
"symlink_target": ""
}
|
import os
import shutil
import htmlmin
import cssmin
import jsmin
from frozenweb.renderer import Renderer
from frozenweb.utils import read_text_file, read_binary_file, ensure_folder, to_bytes, write_binary_file
from frozenweb.config import FileConfig, FolderConfig
class Builder:
def __init__(self, config):
self.config = config
self.renderer = Renderer(config)
def render_file(self, config: FileConfig):
if config.jinja:
content = self.renderer.render(config.path)
if config.minimise:
content = self.minimise(content, config.extension)
return to_bytes(content)
elif config.minimise:
content = read_text_file(config.path)
content = self.minimise(content, config.extension)
return to_bytes(content)
else:
return read_binary_file(config.path)
def minimise(self, content, extension):
if self.config.args.no_minimise:
return content
if extension == '.html':
return htmlmin.minify(
content,
remove_comments=False,
remove_empty_space=True,
remove_all_empty_space=False,
remove_optional_attribute_quotes=False,
keep_pre=True
)
elif extension == '.js':
return jsmin.jsmin(content)
elif extension == '.css':
return cssmin.cssmin(content)
return content
def target_path(self, config):
relpath = os.path.relpath(config.path, self.config.site_root)
return os.path.join(self.config.build_root, relpath)
def build_file(self, config: FileConfig):
if config.skip:
return
content = self.render_file(config)
write_binary_file(self.target_path(config), content)
def build_folder(self, config: FolderConfig):
ensure_folder(self.target_path(config))
for item in config.iterate():
if isinstance(item, FileConfig):
self.build_file(item)
else:
self.build_folder(item)
def build(self):
shutil.rmtree(self.config.build_root, True)
self.build_folder(self.config.root_folder())
|
{
"content_hash": "4e252794bee0cc03c03de47125f0184f",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 104,
"avg_line_length": 33.76119402985075,
"alnum_prop": 0.6074270557029178,
"repo_name": "rufrozen/frozenweb",
"id": "4efcab5df9ffbe69b396764d2b7bf1d9a9360b74",
"size": "2262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frozenweb/builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "441"
},
{
"name": "Python",
"bytes": "8052"
}
],
"symlink_target": ""
}
|
import datetime
import jsonpatch
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import types
from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import exception
from ironic import objects
class PortPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return ['/address', '/node_uuid']
class Port(base.APIBase):
"""API representation of a port.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a port.
"""
_node_uuid = None
def _get_node_uuid(self):
return self._node_uuid
def _set_node_uuid(self, value):
if value and self._node_uuid != value:
try:
node = objects.Node.get_by_uuid(pecan.request.context, value)
self._node_uuid = node.uuid
# NOTE(lucasagomes): Create the node_id attribute on-the-fly
# to satisfy the api -> rpc object
# conversion.
self.node_id = node.id
except exception.NodeNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Port
e.code = 400 # BadRequest
raise e
elif value == wtypes.Unset:
self._node_uuid = wtypes.Unset
uuid = types.uuid
"Unique UUID for this port"
address = wsme.wsattr(types.macaddress, mandatory=True)
"MAC Address for this port"
extra = {wtypes.text: types.MultiType(wtypes.text, six.integer_types)}
"This port's meta data"
node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_uuid,
mandatory=True)
"The UUID of the node this port belongs to"
links = wsme.wsattr([link.Link], readonly=True)
"A list containing a self link and associated port links"
def __init__(self, **kwargs):
self.fields = objects.Port.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
# NOTE(lucasagomes): node_uuid is not part of objects.Port.fields
# because it's an API-only attribute
self.fields.append('node_uuid')
setattr(self, 'node_uuid', kwargs.get('node_id'))
@classmethod
def convert_with_links(cls, rpc_port, expand=True):
port = Port(**rpc_port.as_dict())
if not expand:
port.unset_fields_except(['uuid', 'address'])
# never expose the node_id attribute
port.node_id = wtypes.Unset
port.links = [link.Link.make_link('self', pecan.request.host_url,
'ports', port.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ports', port.uuid,
bookmark=True)
]
return port
@classmethod
def sample(cls):
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
address='fe:54:00:77:07:d9',
extra={'foo': 'bar'},
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow())
# NOTE(lucasagomes): node_uuid getter() method look at the
# _node_uuid variable
sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
return sample
class PortCollection(collection.Collection):
"""API representation of a collection of ports."""
ports = [Port]
"A list containing ports objects"
def __init__(self, **kwargs):
self._type = 'ports'
@classmethod
def convert_with_links(cls, rpc_ports, limit, url=None,
expand=False, **kwargs):
collection = PortCollection()
collection.ports = [Port.convert_with_links(p, expand)
for p in rpc_ports]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.ports = [Port.sample()]
return sample
class PortsController(rest.RestController):
"""REST controller for Ports."""
_custom_actions = {
'detail': ['GET'],
}
def __init__(self, from_nodes=False):
self._from_nodes = from_nodes
def _get_ports_collection(self, node_uuid, marker, limit, sort_key,
sort_dir, expand=False, resource_url=None):
if self._from_nodes and not node_uuid:
raise exception.InvalidParameterValue(_(
"Node id not specified."))
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.Port.get_by_uuid(pecan.request.context,
marker)
if node_uuid:
ports = pecan.request.dbapi.get_ports_by_node(node_uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
ports = pecan.request.dbapi.get_port_list(limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return PortCollection.convert_with_links(ports, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(PortCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def get_all(self, node_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of ports.
:param node_uuid: UUID of a node, to get only ports for that node.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_ports_collection(node_uuid, marker, limit,
sort_key, sort_dir)
@wsme_pecan.wsexpose(PortCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, node_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of ports with detail.
:param node_uuid: UUID of a node, to get only ports for that node.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "ports":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['ports', 'detail'])
return self._get_ports_collection(node_uuid, marker, limit, sort_key,
sort_dir, expand, resource_url)
@wsme_pecan.wsexpose(Port, types.uuid)
def get_one(self, port_uuid):
"""Retrieve information about the given port.
:param port_uuid: UUID of a port.
"""
if self._from_nodes:
raise exception.OperationNotPermitted
rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid)
return Port.convert_with_links(rpc_port)
@wsme_pecan.wsexpose(Port, body=Port, status_code=201)
def post(self, port):
"""Create a new port.
:param port: a port within the request body.
"""
if self._from_nodes:
raise exception.OperationNotPermitted
new_port = pecan.request.dbapi.create_port(port.as_dict())
return Port.convert_with_links(new_port)
@wsme.validate(types.uuid, [PortPatchType])
@wsme_pecan.wsexpose(Port, types.uuid, body=[PortPatchType])
def patch(self, port_uuid, patch):
"""Update an existing port.
:param port_uuid: UUID of a port.
:param patch: a json PATCH document to apply to this port.
"""
if self._from_nodes:
raise exception.OperationNotPermitted
rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid)
try:
port = Port(**jsonpatch.apply_patch(rpc_port.as_dict(),
jsonpatch.JsonPatch(patch)))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Port.fields:
if rpc_port[field] != getattr(port, field):
rpc_port[field] = getattr(port, field)
rpc_node = objects.Node.get_by_uuid(pecan.request.context,
rpc_port.node_id)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
new_port = pecan.request.rpcapi.update_port(
pecan.request.context, rpc_port, topic)
return Port.convert_with_links(new_port)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, port_uuid):
"""Delete a port.
:param port_uuid: UUID of a port.
"""
if self._from_nodes:
raise exception.OperationNotPermitted
pecan.request.dbapi.destroy_port(port_uuid)
|
{
"content_hash": "2904797a57c35f2bc5bb2f4b66390d93",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 79,
"avg_line_length": 37.30960854092527,
"alnum_prop": 0.5608546356352537,
"repo_name": "JioCloud/ironic",
"id": "62025357857860263515656d468f21ad30aa7e1b",
"size": "11116",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/api/controllers/v1/port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1640165"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from sitegeist.data.census.models import Tract
class TractAdmin(admin.ModelAdmin):
list_display = ('tract', 'county', 'state')
admin.site.register(Tract, TractAdmin)
|
{
"content_hash": "c7be792794fb58698461f100225c3ab7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 29.142857142857142,
"alnum_prop": 0.7696078431372549,
"repo_name": "sunlightlabs/sitegeist",
"id": "37fd7715fa513d72e51d895a1195bbaf6aaa5284",
"size": "204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitegeist/data/census/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "253485"
},
{
"name": "JavaScript",
"bytes": "1193"
},
{
"name": "Python",
"bytes": "242549"
},
{
"name": "Shell",
"bytes": "133"
}
],
"symlink_target": ""
}
|
import logging
import struct
import six
from six.moves import xrange
from kafka.codec import (
gzip_encode, gzip_decode, snappy_encode, snappy_decode
)
from kafka.common import (
Message, OffsetAndMessage, TopicAndPartition,
BrokerMetadata, TopicMetadata, PartitionMetadata,
MetadataResponse, ProduceResponse, FetchResponse,
OffsetResponse, OffsetCommitResponse, OffsetFetchResponse,
ProtocolError, BufferUnderflowError, ChecksumError,
ConsumerFetchSizeTooSmall, UnsupportedCodecError
)
from kafka.util import (
crc32, read_short_string, read_int_string, relative_unpack,
write_short_string, write_int_string, group_by_topic_and_partition
)
log = logging.getLogger("kafka")
ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
ALL_CODECS = (CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY)
class KafkaProtocol(object):
"""
Class to encapsulate all of the protocol encoding/decoding.
This class does not have any state associated with it, it is purely
for organization.
"""
PRODUCE_KEY = 0
FETCH_KEY = 1
OFFSET_KEY = 2
METADATA_KEY = 3
OFFSET_COMMIT_KEY = 8
OFFSET_FETCH_KEY = 9
###################
# Private API #
###################
@classmethod
def _encode_message_header(cls, client_id, correlation_id, request_key):
"""
Encode the common request envelope
"""
return struct.pack('>hhih%ds' % len(client_id),
request_key, # ApiKey
0, # ApiVersion
correlation_id, # CorrelationId
len(client_id), # ClientId size
client_id) # ClientId
@classmethod
def _encode_message_set(cls, messages):
"""
Encode a MessageSet. Unlike other arrays in the protocol,
MessageSets are not length-prefixed
Format
======
MessageSet => [Offset MessageSize Message]
Offset => int64
MessageSize => int32
"""
message_set = []
for message in messages:
encoded_message = KafkaProtocol._encode_message(message)
message_set.append(struct.pack('>qi%ds' % len(encoded_message), 0,
len(encoded_message),
encoded_message))
return b''.join(message_set)
@classmethod
def _encode_message(cls, message):
"""
Encode a single message.
The magic number of a message is a format version number.
The only supported magic number right now is zero
Format
======
Message => Crc MagicByte Attributes Key Value
Crc => int32
MagicByte => int8
Attributes => int8
Key => bytes
Value => bytes
"""
if message.magic == 0:
msg = b''.join([
struct.pack('>BB', message.magic, message.attributes),
write_int_string(message.key),
write_int_string(message.value)
])
crc = crc32(msg)
msg = struct.pack('>I%ds' % len(msg), crc, msg)
else:
raise ProtocolError("Unexpected magic number: %d" % message.magic)
return msg
@classmethod
def _decode_message_set_iter(cls, data):
"""
Iteratively decode a MessageSet
Reads repeated elements of (offset, message), calling decode_message
to decode a single message. Since compressed messages contain futher
MessageSets, these two methods have been decoupled so that they may
recurse easily.
"""
cur = 0
read_message = False
while cur < len(data):
try:
((offset, ), cur) = relative_unpack('>q', data, cur)
(msg, cur) = read_int_string(data, cur)
for (offset, message) in KafkaProtocol._decode_message(msg, offset):
read_message = True
yield OffsetAndMessage(offset, message)
except BufferUnderflowError:
# NOTE: Not sure this is correct error handling:
# Is it possible to get a BUE if the message set is somewhere
# in the middle of the fetch response? If so, we probably have
# an issue that's not fetch size too small.
# Aren't we ignoring errors if we fail to unpack data by
# raising StopIteration()?
# If _decode_message() raises a ChecksumError, couldn't that
# also be due to the fetch size being too small?
if read_message is False:
# If we get a partial read of a message, but haven't
# yielded anything there's a problem
raise ConsumerFetchSizeTooSmall()
else:
raise StopIteration()
@classmethod
def _decode_message(cls, data, offset):
"""
Decode a single Message
The only caller of this method is decode_message_set_iter.
They are decoupled to support nested messages (compressed MessageSets).
The offset is actually read from decode_message_set_iter (it is part
of the MessageSet payload).
"""
((crc, magic, att), cur) = relative_unpack('>IBB', data, 0)
if crc != crc32(data[4:]):
raise ChecksumError("Message checksum failed")
(key, cur) = read_int_string(data, cur)
(value, cur) = read_int_string(data, cur)
codec = att & ATTRIBUTE_CODEC_MASK
if codec == CODEC_NONE:
yield (offset, Message(magic, att, key, value))
elif codec == CODEC_GZIP:
gz = gzip_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(gz):
yield (offset, msg)
elif codec == CODEC_SNAPPY:
snp = snappy_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(snp):
yield (offset, msg)
##################
# Public API #
##################
@classmethod
def encode_produce_request(cls, client_id, correlation_id,
payloads=None, acks=1, timeout=1000):
"""
Encode some ProduceRequest structs
Arguments:
client_id: string
correlation_id: int
payloads: list of ProduceRequest
acks: How "acky" you want the request to be
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
timeout: Maximum time the server will wait for acks from replicas.
This is _not_ a socket timeout
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.PRODUCE_KEY))
message.append(struct.pack('>hii', acks, timeout,
len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(struct.pack('>h%dsi' % len(topic), len(topic), topic,
len(topic_payloads)))
for partition, payload in topic_payloads.items():
msg_set = KafkaProtocol._encode_message_set(payload.messages)
message.append(struct.pack('>ii%ds' % len(msg_set), partition,
len(msg_set), msg_set))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg)
@classmethod
def decode_produce_response(cls, data):
"""
Decode bytes to a ProduceResponse
Arguments:
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
((strlen,), cur) = relative_unpack('>h', data, cur)
topic = data[cur:cur + strlen]
cur += strlen
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, offset), cur) = relative_unpack('>ihq',
data, cur)
yield ProduceResponse(topic, partition, error, offset)
@classmethod
def encode_fetch_request(cls, client_id, correlation_id, payloads=None,
max_wait_time=100, min_bytes=4096):
"""
Encodes some FetchRequest structs
Arguments:
client_id: string
correlation_id: int
payloads: list of FetchRequest
max_wait_time: int, how long to block waiting on min_bytes of data
min_bytes: int, the minimum number of bytes to accumulate before
returning the response
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.FETCH_KEY))
# -1 is the replica id
message.append(struct.pack('>iiii', -1, max_wait_time, min_bytes,
len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>iqi', partition, payload.offset,
payload.max_bytes))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg)
@classmethod
def decode_fetch_response(cls, data):
"""
Decode bytes to a FetchResponse
Arguments:
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, highwater_mark_offset), cur) = \
relative_unpack('>ihq', data, cur)
(message_set, cur) = read_int_string(data, cur)
yield FetchResponse(
topic, partition, error,
highwater_mark_offset,
KafkaProtocol._decode_message_set_iter(message_set))
@classmethod
def encode_offset_request(cls, client_id, correlation_id, payloads=None):
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_KEY))
# -1 is the replica id
message.append(struct.pack('>ii', -1, len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>iqi', partition, payload.time,
payload.max_offsets))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg)
@classmethod
def decode_offset_response(cls, data):
"""
Decode bytes to an OffsetResponse
Arguments:
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, num_offsets,), cur) = \
relative_unpack('>ihi', data, cur)
offsets = []
for j in range(num_offsets):
((offset,), cur) = relative_unpack('>q', data, cur)
offsets.append(offset)
yield OffsetResponse(topic, partition, error, tuple(offsets))
@classmethod
def encode_metadata_request(cls, client_id, correlation_id, topics=None,
payloads=None):
"""
Encode a MetadataRequest
Arguments:
client_id: string
correlation_id: int
topics: list of strings
"""
if payloads is None:
topics = [] if topics is None else topics
else:
topics = payloads
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.METADATA_KEY))
message.append(struct.pack('>i', len(topics)))
for topic in topics:
message.append(struct.pack('>h%ds' % len(topic), len(topic), topic))
msg = b''.join(message)
return write_int_string(msg)
@classmethod
def decode_metadata_response(cls, data):
"""
Decode bytes to a MetadataResponse
Arguments:
data: bytes to decode
"""
((correlation_id, numbrokers), cur) = relative_unpack('>ii', data, 0)
# Broker info
brokers = []
for i in range(numbrokers):
((nodeId, ), cur) = relative_unpack('>i', data, cur)
(host, cur) = read_short_string(data, cur)
((port,), cur) = relative_unpack('>i', data, cur)
brokers.append(BrokerMetadata(nodeId, host, port))
# Topic info
((num_topics,), cur) = relative_unpack('>i', data, cur)
topic_metadata = []
for i in range(num_topics):
((topic_error,), cur) = relative_unpack('>h', data, cur)
(topic_name, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
partition_metadata = []
for j in range(num_partitions):
((partition_error_code, partition, leader, numReplicas), cur) = \
relative_unpack('>hiii', data, cur)
(replicas, cur) = relative_unpack(
'>%di' % numReplicas, data, cur)
((num_isr,), cur) = relative_unpack('>i', data, cur)
(isr, cur) = relative_unpack('>%di' % num_isr, data, cur)
partition_metadata.append(
PartitionMetadata(topic_name, partition, leader,
replicas, isr, partition_error_code)
)
topic_metadata.append(
TopicMetadata(topic_name, topic_error, partition_metadata)
)
return MetadataResponse(brokers, topic_metadata)
@classmethod
def encode_offset_commit_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetCommitRequest structs
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_COMMIT_KEY))
message.append(write_short_string(group))
message.append(struct.pack('>i', len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>iq', partition, payload.offset))
message.append(write_short_string(payload.metadata))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg)
@classmethod
def decode_offset_commit_response(cls, data):
"""
Decode bytes to an OffsetCommitResponse
Arguments:
data: bytes to decode
"""
((correlation_id,), cur) = relative_unpack('>i', data, 0)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for i in xrange(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in xrange(num_partitions):
((partition, error), cur) = relative_unpack('>ih', data, cur)
yield OffsetCommitResponse(topic, partition, error)
@classmethod
def encode_offset_fetch_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetFetchRequest structs
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_FETCH_KEY))
message.append(write_short_string(group))
message.append(struct.pack('>i', len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>i', partition))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg)
@classmethod
def decode_offset_fetch_response(cls, data):
"""
Decode bytes to an OffsetFetchResponse
Arguments:
data: bytes to decode
"""
((correlation_id,), cur) = relative_unpack('>i', data, 0)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, offset), cur) = relative_unpack('>iq', data, cur)
(metadata, cur) = read_short_string(data, cur)
((error,), cur) = relative_unpack('>h', data, cur)
yield OffsetFetchResponse(topic, partition, offset,
metadata, error)
def create_message(payload, key=None):
"""
Construct a Message
Arguments:
payload: bytes, the payload to send to Kafka
key: bytes, a key used for partition routing (optional)
"""
return Message(0, 0, key, payload)
def create_gzip_message(payloads, key=None):
"""
Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
gzipped = gzip_encode(message_set)
codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP
return Message(0, 0x00 | codec, key, gzipped)
def create_snappy_message(payloads, key=None):
"""
Construct a Snappy Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
snapped = snappy_encode(message_set)
codec = ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY
return Message(0, 0x00 | codec, key, snapped)
def create_message_set(messages, codec=CODEC_NONE, key=None):
"""Create a message set using the given codec.
If codec is CODEC_NONE, return a list of raw Kafka messages. Otherwise,
return a list containing a single codec-encoded message.
"""
if codec == CODEC_NONE:
return [create_message(m, k) for m, k in messages]
elif codec == CODEC_GZIP:
return [create_gzip_message(messages, key)]
elif codec == CODEC_SNAPPY:
return [create_snappy_message(messages, key)]
else:
raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)
|
{
"content_hash": "e16ecc858e3346939d3c39fd55d97dfc",
"timestamp": "",
"source": "github",
"line_count": 604,
"max_line_length": 84,
"avg_line_length": 36.067880794701985,
"alnum_prop": 0.5592380078035345,
"repo_name": "docker-hub/kafka-python",
"id": "b34a95d1d2d37f8822e014ff44b62047b1283cc5",
"size": "21785",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kafka/protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "289693"
},
{
"name": "Shell",
"bytes": "2639"
}
],
"symlink_target": ""
}
|
"""
WSGI config for duckiehunt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'duckiehunt.settings')
application = get_wsgi_application()
|
{
"content_hash": "baca313d452d5df8f8e3a5e397c5ab9a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.8125,
"alnum_prop": 0.7732997481108312,
"repo_name": "lastcoolnameleft/duckiehunt",
"id": "a2506ea92105641bdd662303d68ab91ca4b3c17b",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/duckiehunt/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3346"
},
{
"name": "Dockerfile",
"bytes": "416"
},
{
"name": "HTML",
"bytes": "138998"
},
{
"name": "Mustache",
"bytes": "516"
},
{
"name": "Python",
"bytes": "27016"
},
{
"name": "Shell",
"bytes": "432"
}
],
"symlink_target": ""
}
|
import re
from typing import Pattern
import requests_mock
from tests.fixture_data import (
EVENT_RESPONSE_DATA,
EXECUTION_DATA,
OUTPUT_DATUM_DATA,
OUTPUT_DATUM_DOWNLOAD_RESPONSE_DATA,
OUTPUT_DATUM_RESPONSE_DATA,
PROJECT_DATA,
)
API_PREFIX = 'https://app.valohai.com/api/v0/'
def get_startswith_re(text: str) -> Pattern:
return re.compile(f'^{re.escape(text)}')
def get_execution_data_mock():
exec_id = EXECUTION_DATA['id']
datum_id = OUTPUT_DATUM_DATA['id']
m = requests_mock.mock()
project_id = PROJECT_DATA['id']
execution_counter = EXECUTION_DATA['counter']
m.get(f'{API_PREFIX}projects/{project_id}/', json=PROJECT_DATA)
m.get(f'{API_PREFIX}executions/', json={'results': [EXECUTION_DATA]})
m.get(f'{API_PREFIX}executions/{exec_id}/', json=EXECUTION_DATA)
m.get(f'{API_PREFIX}executions/{exec_id}/events/', json=EVENT_RESPONSE_DATA)
m.get(f'{API_PREFIX}data/?output_execution={exec_id}&limit=9000', json=OUTPUT_DATUM_RESPONSE_DATA)
m.get(f'{API_PREFIX}data/{datum_id}/download/', json=OUTPUT_DATUM_DOWNLOAD_RESPONSE_DATA)
execution_by_counter_url = f'{API_PREFIX}executions/{project_id}:{execution_counter}/'
m.get(url=get_startswith_re(execution_by_counter_url), json=EXECUTION_DATA)
m.delete(execution_by_counter_url, json={'ok': True})
m.post(re.compile('^https://app.valohai.com/api/v0/data/(.+?)/purge/$'), json={'ok': True})
return m
|
{
"content_hash": "ce995b1a74c4e86281d55f41b20c7bfc",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 102,
"avg_line_length": 37.973684210526315,
"alnum_prop": 0.6846846846846847,
"repo_name": "valohai/valohai-cli",
"id": "1dff86e5948557b98da43b45a528b5477a8de68e",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/commands/execution/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "411"
},
{
"name": "Python",
"bytes": "279031"
}
],
"symlink_target": ""
}
|
"""
Buffer editing for both ST2 and ST3 that 'just works'.
Copyright, SublimeXiki project <https://github.com/lunixbochs/SublimeXiki>
"""
import inspect
import sublime
import sublime_plugin
try:
sublime.edit_storage
except AttributeError:
sublime.edit_storage = {}
def run_callback(func, *args, **kwargs):
spec = inspect.getfullargspec(func)
if spec.args or spec.varargs:
func(*args, **kwargs)
else:
func()
class EditFuture:
def __init__(self, func):
self.func = func
def resolve(self, view, edit):
return self.func(view, edit)
class EditStep:
def __init__(self, cmd, *args):
self.cmd = cmd
self.args = args
def run(self, view, edit):
if self.cmd == 'callback':
return run_callback(self.args[0], view, edit)
funcs = {
'insert': view.insert,
'erase': view.erase,
'replace': view.replace,
}
func = funcs.get(self.cmd)
if func:
args = self.resolve_args(view, edit)
func(edit, *args)
def resolve_args(self, view, edit):
args = []
for arg in self.args:
if isinstance(arg, EditFuture):
arg = arg.resolve(view, edit)
args.append(arg)
return args
class Edit:
def __init__(self, view):
self.view = view
self.steps = []
def __nonzero__(self):
return bool(self.steps)
@classmethod
def future(cls, func):
return EditFuture(func)
def step(self, cmd, *args):
step = EditStep(cmd, *args)
self.steps.append(step)
def insert(self, point, string):
self.step('insert', point, string)
def erase(self, region):
self.step('erase', region)
def replace(self, region, string):
self.step('replace', region, string)
def sel(self, start, end=None):
if end is None:
end = start
self.step('sel', start, end)
def callback(self, func):
self.step('callback', func)
def run(self, view, edit):
for step in self.steps:
step.run(view, edit)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
view = self.view
if sublime.version().startswith('2'):
edit = view.begin_edit()
self.run(view, edit)
view.end_edit(edit)
else:
key = str(hash(tuple(self.steps)))
sublime.edit_storage[key] = self.run
view.run_command('apply_edit', {'key': key})
class apply_edit(sublime_plugin.TextCommand):
def run(self, edit, key):
sublime.edit_storage.pop(key)(self.view, edit)
|
{
"content_hash": "6d8f699cc6ad82f73f417fab980e2266",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 74,
"avg_line_length": 24.8,
"alnum_prop": 0.5619501466275659,
"repo_name": "teedoo/dotfiles",
"id": "436395d9204ee68ace03a9c6d213a692855d1659",
"size": "2728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".sublime/Packages/CTags/helpers/edit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17998"
},
{
"name": "Java",
"bytes": "77"
},
{
"name": "JavaScript",
"bytes": "14353"
},
{
"name": "Python",
"bytes": "1690685"
},
{
"name": "Shell",
"bytes": "30064"
},
{
"name": "Vim script",
"bytes": "49588"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
import imageio
import imgaug as ia
import imgaug.augmenters as iaa
def main():
aug = iaa.BlendAlphaMask(
iaa.SomeColorsMaskGen(),
iaa.OneOf([
iaa.TotalDropout(1.0),
iaa.AveragePooling(8)
])
)
aug2 = iaa.BlendAlphaSomeColors(iaa.OneOf([
iaa.TotalDropout(1.0),
iaa.AveragePooling(8)
]))
urls = [
("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/"
"Sarcophilus_harrisii_taranna.jpg/"
"320px-Sarcophilus_harrisii_taranna.jpg"),
("https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/"
"Vincent_van_Gogh_-_Wheatfield_with_crows_-_Google_Art_Project.jpg/"
"320px-Vincent_van_Gogh_-_Wheatfield_with_crows_-_Google_Art_Project"
".jpg"),
("https://upload.wikimedia.org/wikipedia/commons/thumb/0/0c/"
"Galerella_sanguinea_Zoo_Praha_2011-2.jpg/207px-Galerella_sanguinea_"
"Zoo_Praha_2011-2.jpg"),
("https://upload.wikimedia.org/wikipedia/commons/thumb/9/96/"
"Ambrosius_Bosschaert_the_Elder_%28Dutch_-_Flower_Still_Life_-_"
"Google_Art_Project.jpg/307px-Ambrosius_Bosschaert_the_Elder_%28"
"Dutch_-_Flower_Still_Life_-_Google_Art_Project.jpg")
]
for url in urls:
img = imageio.imread(url)
ia.imshow(ia.draw_grid(aug(images=[img]*25), cols=5, rows=5))
ia.imshow(ia.draw_grid(aug2(images=[img]*25), cols=5, rows=5))
if __name__ == "__main__":
main()
|
{
"content_hash": "dcdd610982d7d9f04f7137d6577e0a9f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 35,
"alnum_prop": 0.6190476190476191,
"repo_name": "aleju/imgaug",
"id": "78820fff5a0911baf744031276397c4d07bdaa38",
"size": "1575",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "checks/check_blendalphasomecolors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5690574"
},
{
"name": "Shell",
"bytes": "609"
}
],
"symlink_target": ""
}
|
_contracts = {}
class Contract:
@classmethod
def __init_subclass__(cls):
_contracts[cls.__name__] = cls
def __set__(self, instance, value):
self.check(value)
instance.__dict__[self.name] = value
def __set_name__(self, owner, name):
self.name = name
@classmethod
def check(cls, value):
pass
class Typed(Contract):
@classmethod
def check(cls, value):
assert isinstance(value, cls.type), f'Expected {cls.type}'
# f'a = {a}' 은 PEP 498의 F-String 이라는 문법
super().check(value)
i = 1
class Integer(Typed):
type = int
class Float(Typed):
type = float
class String(Typed):
type = str
class Positive(Contract):
@classmethod
def check(cls, value):
assert value > 0, 'Must be > 0'
super().check(value)
class PositiveInteger(Integer, Positive):
pass
class NonEmpty(Contract):
# 길이가 0보다 큰지 검사
@classmethod
def check(cls, value):
assert len(value) > 0, 'Must be nonempty'
super().check(value)
class NonEmptyString(String, NonEmpty):
# 길이가 0보다 큰 문자열인지 검사
pass
from functools import wraps
from inspect import signature
def checked(func):
sig = signature(func)
ann = ChainMap(
func.__annotations__,
func.__globals__.get('__annotations__'), {}
)
@wraps(func)
# @wraps를 사용하는 이유:
# 데코레이터 내부에서 인자로 전달받은 함수가 익명함수 처럼 취급되어 버리므로
# 디버깅이 난해해지는 단점이 있었기 때문
def wrapper(*args, **kwargs):
bound = sig.bind(*args, **kwargs)
for name, val in bound.arguments.items():
if name in ann:
ann[name].check(val)
return func(*args, **kwargs)
return wrapper
from collections import ChainMap
class BaseMeta(type):
def __prepare__(cls, *args):
return ChainMap({}, _contracts)
def __new__(meta, name, bases, methods):
methods = methods.maps[0]
return super().__new__(meta, name, bases, methods)
class Base(metaclass=BaseMeta):
@classmethod
def __init_subclass__(cls):
for name, val in cls.__dict__.items():
if callable(val):
setattr(cls, name, checked(val))
for name, val in cls.__annotations__.items():
contract = val()
contract.__set_name__(cls, name)
setattr(cls, name, contract)
def __init__(self, *args):
ann = self.__annotations__
assert len(ann) == len(args), f'Expected {len(ann)} arguments'
for name, val in zip(ann, args):
setattr(self, name, val)
def __repr__(self):
args = ', '.join(repr(getattr(self, name)) for name in self.__annotations__)
return f'{type(self).__name__}({args})'
|
{
"content_hash": "51308276a87f1500892fbe5877c3c9eb",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 84,
"avg_line_length": 22.508196721311474,
"alnum_prop": 0.57319737800437,
"repo_name": "kjihee/lab_study_group",
"id": "5d00ec47e718da6419d3f44c51a16a5102dc6bdf",
"size": "2918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "2017/Issue/The Fun of Reinvention/contract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2511159"
},
{
"name": "Python",
"bytes": "46237"
}
],
"symlink_target": ""
}
|
"""
github.com/mikedh/trimesh
----------------------------
Library for importing, exporting and doing simple operations on triangular meshes.
"""
from . import ray
from . import util
from . import units
from . import poses
from . import graph
from . import sample
from . import repair
from . import convex
from . import remesh
from . import caching
from . import inertia
from . import boolean
from . import grouping
from . import geometry
from . import permutate
from . import proximity
from . import triangles
from . import curvature
from . import smoothing # noqa
from . import comparison
from . import registration
from . import decomposition
from . import intersections
from . import transformations
from .visual import create_visual, TextureVisuals
from .exchange.export import export_mesh
from .constants import log, log_time, tol
from .scene import Scene
from .parent import Geometry3D
import copy
import warnings
import numpy as np
class Trimesh(Geometry3D):
def __init__(self,
vertices=None,
faces=None,
face_normals=None,
vertex_normals=None,
face_colors=None,
vertex_colors=None,
face_attributes=None,
vertex_attributes=None,
metadata=None,
process=True,
validate=False,
merge_tex=None,
merge_norm=None,
use_embree=True,
initial_cache=None,
visual=None,
**kwargs):
"""
A Trimesh object contains a triangular 3D mesh.
Parameters
------------
vertices : (n, 3) float
Array of vertex locations
faces : (m, 3) or (m, 4) int
Array of triangular or quad faces (triangulated on load)
face_normals : (m, 3) float
Array of normal vectors corresponding to faces
vertex_normals : (n, 3) float
Array of normal vectors for vertices
metadata : dict
Any metadata about the mesh
process : bool
if True, Nan and Inf values will be removed
immediately and vertices will be merged
validate : bool
If True, degenerate and duplicate faces will be
removed immediately, and some functions will alter
the mesh to ensure consistent results.
use_embree : bool
If True try to use pyembree raytracer.
If pyembree is not available it will automatically fall
back to a much slower rtree/numpy implementation
initial_cache : dict
A way to pass things to the cache in case expensive
things were calculated before creating the mesh object.
visual : ColorVisuals or TextureVisuals
Assigned to self.visual
"""
if initial_cache is None:
initial_cache = {}
# self._data stores information about the mesh which
# CANNOT be regenerated.
# in the base class all that is stored here is vertex and
# face information
# any data put into the store is converted to a TrackedArray
# which is a subclass of np.ndarray that provides hash and crc
# methods which can be used to detect changes in the array.
self._data = caching.DataStore()
# self._cache stores information about the mesh which CAN be
# regenerated from self._data, but may be slow to calculate.
# In order to maintain consistency
# the cache is cleared when self._data.crc() changes
self._cache = caching.Cache(
id_function=self._data.__hash__,
force_immutable=True)
self._cache.update(initial_cache)
# check for None only to avoid warning messages in subclasses
if vertices is not None:
# (n, 3) float, set of vertices
self.vertices = vertices
if faces is not None:
# (m, 3) int of triangle faces, references self.vertices
self.faces = faces
# hold visual information about the mesh (vertex and face colors)
if visual is None:
self.visual = create_visual(
face_colors=face_colors,
vertex_colors=vertex_colors,
mesh=self)
else:
self.visual = visual
# normals are accessed through setters/properties and are regenerated
# if dimensions are inconsistent, but can be set by the constructor
# to avoid a substantial number of cross products
if face_normals is not None:
self.face_normals = face_normals
# (n, 3) float of vertex normals, can be created from face normals
if vertex_normals is not None:
self.vertex_normals = vertex_normals
# embree is a much, much faster raytracer written by Intel
# if you have pyembree installed you should use it
# although both raytracers were designed to have a common API
if ray.has_embree and use_embree:
self.ray = ray.ray_pyembree.RayMeshIntersector(self)
else:
# create a ray-mesh query object for the current mesh
# initializing is very inexpensive and object is convenient to have.
# On first query expensive bookkeeping is done (creation of r-tree),
# and is cached for subsequent queries
self.ray = ray.ray_triangle.RayMeshIntersector(self)
# a quick way to get permuted versions of the current mesh
self.permutate = permutate.Permutator(self)
# convenience class for nearest point queries
self.nearest = proximity.ProximityQuery(self)
# store metadata about the mesh in a dictionary
self.metadata = dict()
# update the mesh metadata with passed metadata
if isinstance(metadata, dict):
self.metadata.update(metadata)
elif metadata is not None:
raise ValueError(
'metadata should be a dict or None, got %s' % str(metadata))
# Set the default center of mass and density
self._density = 1.0
self._center_mass = None
# store per-face and per-vertex attributes which will
# be updated when an update_faces call is made
self.face_attributes = {}
self.vertex_attributes = {}
# use update to copy items
if face_attributes is not None:
self.face_attributes.update(face_attributes)
if vertex_attributes is not None:
self.vertex_attributes.update(vertex_attributes)
# process will remove NaN and Inf values and merge vertices
# if validate, will remove degenerate and duplicate faces
if process or validate:
self.process(validate=validate,
merge_tex=merge_tex,
merge_norm=merge_norm)
# save reference to kwargs
self._kwargs = kwargs
def process(self,
validate=False,
merge_tex=None,
merge_norm=None):
"""
Do processing to make a mesh useful.
Does this by:
1) removing NaN and Inf values
2) merging duplicate vertices
If validate:
3) Remove triangles which have one edge
of their 2D oriented bounding box
shorter than tol.merge
4) remove duplicated triangles
5) ensure triangles are consistently wound
and normals face outwards
Parameters
------------
validate : bool
Remove degenerate and duplicate faces.
Returns
------------
self: trimesh.Trimesh
Current mesh
"""
# if there are no vertices or faces exit early
if self.is_empty:
return self
# avoid clearing the cache during operations
with self._cache:
self.remove_infinite_values()
self.merge_vertices(merge_tex=merge_tex,
merge_norm=merge_norm)
# if we're cleaning remove duplicate
# and degenerate faces
if validate:
self.remove_duplicate_faces()
self.remove_degenerate_faces()
self.fix_normals()
# since none of our process operations moved vertices or faces
# we can keep face and vertex normals in the cache without recomputing
# if faces or vertices have been removed, normals are validated before
# being returned so there is no danger of inconsistent dimensions
self._cache.clear(exclude={'face_normals',
'vertex_normals'})
self.metadata['processed'] = True
return self
@property
def faces(self):
"""
The faces of the mesh.
This is regarded as core information which cannot be
regenerated from cache and as such is stored in
`self._data` which tracks the array for changes and
clears cached values of the mesh altered.
Returns
----------
faces : (n, 3) int64
References for `self.vertices` for triangles.
"""
return self._data.get(
'faces', np.empty(shape=(0, 3), dtype=np.int64))
@faces.setter
def faces(self, values):
"""
Set the vertex indexes that make up triangular faces.
Parameters
--------------
values : (n, 3) int64
Indexes of self.vertices
"""
if values is None or len(values) == 0:
return self._data.data.pop('faces', None)
if not (isinstance(values, np.ndarray) and values.dtype == np.int64):
values = np.asanyarray(values, dtype=np.int64)
# automatically triangulate quad faces
if len(values.shape) == 2 and values.shape[1] != 3:
log.info('triangulating faces')
values = geometry.triangulate_quads(values)
self._data['faces'] = values
@caching.cache_decorator
def faces_sparse(self):
"""
A sparse matrix representation of the faces.
Returns
----------
sparse : scipy.sparse.coo_matrix
Has properties:
dtype : bool
shape : (len(self.vertices), len(self.faces))
"""
sparse = geometry.index_sparse(
columns=len(self.vertices),
indices=self.faces)
return sparse
@property
def face_normals(self):
"""
Return the unit normal vector for each face.
If a face is degenerate and a normal can't be generated
a zero magnitude unit vector will be returned for that face.
Returns
-----------
normals : (len(self.faces), 3) np.float64
Normal vectors of each face
"""
# check shape of cached normals
cached = self._cache['face_normals']
# get faces from datastore
if 'faces' in self._data:
faces = self._data.data['faces']
else:
faces = None
# if we have no faces exit early
if faces is None or len(faces) == 0:
return np.array([], dtype=np.int64).reshape((0, 3))
# if the shape of cached normals equals the shape of faces return
if np.shape(cached) == np.shape(faces):
return cached
log.debug('generating face normals')
# use cached triangle cross products to generate normals
# this will always return the correct shape but some values
# will be zero or an arbitrary vector if the inputs had
# a cross product below machine epsilon
normals, valid = triangles.normals(
triangles=self.triangles,
crosses=self.triangles_cross)
# if all triangles are valid shape is correct
if valid.all():
# put calculated face normals into cache manually
self._cache['face_normals'] = normals
return normals
# make a padded list of normals for correct shape
padded = np.zeros((len(self.triangles), 3),
dtype=np.float64)
padded[valid] = normals
# put calculated face normals into cache manually
self._cache['face_normals'] = padded
return padded
@face_normals.setter
def face_normals(self, values):
"""
Assign values to face normals.
Parameters
-------------
values : (len(self.faces), 3) float
Unit face normals
"""
# if nothing passed exit
if values is None:
return
# make sure candidate face normals are C-contiguous float
values = np.asanyarray(
values, order='C', dtype=np.float64)
# face normals need to correspond to faces
if len(values) == 0 or values.shape != self.faces.shape:
log.debug('face_normals incorrect shape, ignoring!')
return
# check if any values are larger than tol.merge
# don't set the normals if they are all zero
ptp = values.ptp()
if not np.isfinite(ptp):
log.debug('face_normals contain NaN, ignoring!')
return
if ptp < tol.merge:
log.debug('face_normals all zero, ignoring!')
return
# make sure the first few normals match the first few triangles
check, valid = triangles.normals(
self.vertices.view(np.ndarray)[self.faces[:20]])
compare = np.zeros((len(valid), 3))
compare[valid] = check
if not np.allclose(compare, values[:20]):
log.debug("face_normals didn't match triangles, ignoring!")
return
# otherwise store face normals
self._cache['face_normals'] = values
@property
def vertices(self):
"""
The vertices of the mesh.
This is regarded as core information which cannot be
generated from cache and as such is stored in self._data
which tracks the array for changes and clears cached
values of the mesh if this is altered.
Returns
----------
vertices : (n, 3) float
Points in cartesian space referenced by self.faces
"""
return self._data.get('vertices', np.empty(shape=(0, 3), dtype=np.float64))
@vertices.setter
def vertices(self, values):
"""
Assign vertex values to the mesh.
Parameters
--------------
values : (n, 3) float
Points in space
"""
self._data['vertices'] = np.asanyarray(
values, order='C', dtype=np.float64)
@caching.cache_decorator
def vertex_normals(self):
"""
The vertex normals of the mesh. If the normals were loaded
we check to make sure we have the same number of vertex
normals and vertices before returning them. If there are
no vertex normals defined or a shape mismatch we calculate
the vertex normals from the mean normals of the faces the
vertex is used in.
Returns
----------
vertex_normals : (n, 3) float
Represents the surface normal at each vertex.
Where n == len(self.vertices)
"""
# make sure we have faces_sparse
assert hasattr(self.faces_sparse, 'dot')
vertex_normals = geometry.weighted_vertex_normals(
vertex_count=len(self.vertices),
faces=self.faces,
face_normals=self.face_normals,
face_angles=self.face_angles)
return vertex_normals
@vertex_normals.setter
def vertex_normals(self, values):
"""
Assign values to vertex normals.
Parameters
-------------
values : (len(self.vertices), 3) float
Unit normal vectors for each vertex
"""
if values is not None:
values = np.asanyarray(values,
order='C',
dtype=np.float64)
if values.shape == self.vertices.shape:
# check to see if they assigned all zeros
if values.ptp() < tol.merge:
log.debug('vertex_normals are all zero!')
self._cache['vertex_normals'] = values
@caching.cache_decorator
def vertex_faces(self):
"""
A representation of the face indices that correspond to each vertex.
Returns
----------
vertex_faces : (n,m) int
Each row contains the face indices that correspond to the given vertex,
padded with -1 up to the max number of faces corresponding to any one vertex
Where n == len(self.vertices), m == max number of faces for a single vertex
"""
vertex_faces = geometry.vertex_face_indices(
vertex_count=len(self.vertices),
faces=self.faces,
faces_sparse=self.faces_sparse)
return vertex_faces
@caching.cache_decorator
def bounds(self):
"""
The axis aligned bounds of the faces of the mesh.
Returns
-----------
bounds : (2, 3) float or None
Bounding box with [min, max] coordinates
If mesh is empty will return None
"""
# return bounds including ONLY referenced vertices
in_mesh = self.vertices[self.referenced_vertices]
# don't crash if we have no vertices referenced
if len(in_mesh) == 0:
return None
# get mesh bounds with min and max
return np.array([in_mesh.min(axis=0),
in_mesh.max(axis=0)])
@caching.cache_decorator
def extents(self):
"""
The length, width, and height of the axis aligned
bounding box of the mesh.
Returns
-----------
extents : (3, ) float or None
Array containing axis aligned [length, width, height]
If mesh is empty returns None
"""
# if mesh is empty return None
if self.bounds is None:
return None
extents = self.bounds.ptp(axis=0)
return extents
@caching.cache_decorator
def scale(self):
"""
A metric for the overall scale of the mesh, the length of the
diagonal of the axis aligned bounding box of the mesh.
Returns
----------
scale : float
The length of the meshes AABB diagonal
"""
# if mesh is empty just return no scale
if self.extents is None:
return 1.0
# make sure we are returning python floats
scale = float((self.extents ** 2).sum() ** .5)
return scale
@caching.cache_decorator
def centroid(self):
"""
The point in space which is the average of the triangle
centroids weighted by the area of each triangle.
This will be valid even for non-watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3, ) float
The average vertex weighted by face area
"""
# use the centroid of each triangle weighted by
# the area of the triangle to find the overall centroid
try:
centroid = np.average(self.triangles_center,
weights=self.area_faces,
axis=0)
except BaseException:
# if all triangles are zero-area weights will not work
centroid = self.triangles_center.mean(axis=0)
return centroid
@property
def center_mass(self):
"""
The point in space which is the center of mass/volume.
If the current mesh is not watertight this is meaningless
garbage unless it was explicitly set.
Returns
-----------
center_mass : (3, ) float
Volumetric center of mass of the mesh
"""
center_mass = self.mass_properties['center_mass']
return center_mass
@center_mass.setter
def center_mass(self, cm):
self._center_mass = cm
self._cache.delete('mass_properties')
@property
def density(self):
"""
The density of the mesh.
Returns
-----------
density : float
The density of the mesh.
"""
density = self.mass_properties['density']
return density
@density.setter
def density(self, value):
"""
Set the density of the mesh.
Parameters
-------------
density : float
Specify the density of the mesh to be
used in inertia calculations.
"""
self._density = float(value)
self._cache.delete('mass_properties')
@property
def volume(self):
"""
Volume of the current mesh calculated using a surface
integral. If the current mesh isn't watertight this is
garbage.
Returns
---------
volume : float
Volume of the current mesh
"""
volume = self.mass_properties['volume']
return volume
@property
def mass(self):
"""
Mass of the current mesh, based on specified density and
volume. If the current mesh isn't watertight this is garbage.
Returns
---------
mass : float
Mass of the current mesh
"""
mass = self.mass_properties['mass']
return mass
@property
def moment_inertia(self):
"""
Return the moment of inertia matrix of the current mesh.
If mesh isn't watertight this is garbage.
Returns
---------
inertia : (3, 3) float
Moment of inertia of the current mesh
"""
inertia = self.mass_properties['inertia']
return inertia
@caching.cache_decorator
def principal_inertia_components(self):
"""
Return the principal components of inertia
Ordering corresponds to mesh.principal_inertia_vectors
Returns
----------
components : (3, ) float
Principal components of inertia
"""
# both components and vectors from inertia matrix
components, vectors = inertia.principal_axis(self.moment_inertia)
# store vectors in cache for later
self._cache['principal_inertia_vectors'] = vectors
return components
@property
def principal_inertia_vectors(self):
"""
Return the principal axis of inertia as unit vectors.
The order corresponds to `mesh.principal_inertia_components`.
Returns
----------
vectors : (3, 3) float
Three vectors pointing along the
principal axis of inertia directions
"""
_ = self.principal_inertia_components
return self._cache['principal_inertia_vectors']
@caching.cache_decorator
def principal_inertia_transform(self):
"""
A transform which moves the current mesh so the principal
inertia vectors are on the X,Y, and Z axis, and the centroid is
at the origin.
Returns
----------
transform : (4, 4) float
Homogeneous transformation matrix
"""
order = np.argsort(self.principal_inertia_components)[1:][::-1]
vectors = self.principal_inertia_vectors[order]
vectors = np.vstack((vectors, np.cross(*vectors)))
transform = np.eye(4)
transform[:3, :3] = vectors
transform = transformations.transform_around(
matrix=transform,
point=self.centroid)
transform[:3, 3] -= self.centroid
return transform
@caching.cache_decorator
def symmetry(self):
"""
Check whether a mesh has rotational symmetry around
an axis (radial) or point (spherical).
Returns
-----------
symmetry : None, 'radial', 'spherical'
What kind of symmetry does the mesh have.
"""
symmetry, axis, section = inertia.radial_symmetry(self)
self._cache['symmetry_axis'] = axis
self._cache['symmetry_section'] = section
return symmetry
@property
def symmetry_axis(self):
"""
If a mesh has rotational symmetry, return the axis.
Returns
------------
axis : (3, ) float
Axis around which a 2D profile was revolved to create this mesh.
"""
if self.symmetry is not None:
return self._cache['symmetry_axis']
@property
def symmetry_section(self):
"""
If a mesh has rotational symmetry return the two
vectors which make up a section coordinate frame.
Returns
----------
section : (2, 3) float
Vectors to take a section along
"""
if self.symmetry is not None:
return self._cache['symmetry_section']
@caching.cache_decorator
def triangles(self):
"""
Actual triangles of the mesh (points, not indexes)
Returns
---------
triangles : (n, 3, 3) float
Points of triangle vertices
"""
# use of advanced indexing on our tracked arrays will
# trigger a change flag which means the hash will have to be
# recomputed. We can escape this check by viewing the array.
triangles = self.vertices.view(np.ndarray)[self.faces]
return triangles
@caching.cache_decorator
def triangles_tree(self):
"""
An R-tree containing each face of the mesh.
Returns
----------
tree : rtree.index
Each triangle in self.faces has a rectangular cell
"""
tree = triangles.bounds_tree(self.triangles)
return tree
@caching.cache_decorator
def triangles_center(self):
"""
The center of each triangle (barycentric [1/3, 1/3, 1/3])
Returns
---------
triangles_center : (len(self.faces), 3) float
Center of each triangular face
"""
triangles_center = self.triangles.mean(axis=1)
return triangles_center
@caching.cache_decorator
def triangles_cross(self):
"""
The cross product of two edges of each triangle.
Returns
---------
crosses : (n, 3) float
Cross product of each triangle
"""
crosses = triangles.cross(self.triangles)
return crosses
@caching.cache_decorator
def edges(self):
"""
Edges of the mesh (derived from faces).
Returns
---------
edges : (n, 2) int
List of vertex indices making up edges
"""
edges, index = geometry.faces_to_edges(self.faces.view(np.ndarray),
return_index=True)
self._cache['edges_face'] = index
return edges
@caching.cache_decorator
def edges_face(self):
"""
Which face does each edge belong to.
Returns
---------
edges_face : (n, ) int
Index of self.faces
"""
_ = self.edges
return self._cache['edges_face']
@caching.cache_decorator
def edges_unique(self):
"""
The unique edges of the mesh.
Returns
----------
edges_unique : (n, 2) int
Vertex indices for unique edges
"""
unique, inverse = grouping.unique_rows(self.edges_sorted)
edges_unique = self.edges_sorted[unique]
# edges_unique will be added automatically by the decorator
# additional terms generated need to be added to the cache manually
self._cache['edges_unique_idx'] = unique
self._cache['edges_unique_inverse'] = inverse
return edges_unique
@caching.cache_decorator
def edges_unique_length(self):
"""
How long is each unique edge.
Returns
----------
length : (len(self.edges_unique), ) float
Length of each unique edge
"""
vector = np.subtract(*self.vertices[self.edges_unique.T])
length = util.row_norm(vector)
return length
@caching.cache_decorator
def edges_unique_inverse(self):
"""
Return the inverse required to reproduce
self.edges_sorted from self.edges_unique.
Useful for referencing edge properties:
mesh.edges_unique[mesh.edges_unique_inverse] == m.edges_sorted
Returns
----------
inverse : (len(self.edges), ) int
Indexes of self.edges_unique
"""
_ = self.edges_unique
return self._cache['edges_unique_inverse']
@caching.cache_decorator
def edges_sorted(self):
"""
Edges sorted along axis 1
Returns
----------
edges_sorted : (n, 2)
Same as self.edges but sorted along axis 1
"""
edges_sorted = np.sort(self.edges, axis=1)
return edges_sorted
@caching.cache_decorator
def edges_sorted_tree(self):
"""
A KDTree for mapping edges back to edge index.
Returns
------------
tree : scipy.spatial.cKDTree
Tree when queried with edges will return
their index in mesh.edges_sorted
"""
from scipy.spatial import cKDTree
return cKDTree(self.edges_sorted)
@caching.cache_decorator
def edges_sparse(self):
"""
Edges in sparse bool COO graph format where connected
vertices are True.
Returns
----------
sparse: (len(self.vertices), len(self.vertices)) bool
Sparse graph in COO format
"""
sparse = graph.edges_to_coo(self.edges,
count=len(self.vertices))
return sparse
@caching.cache_decorator
def body_count(self):
"""
How many connected groups of vertices exist in this mesh.
Note that this number may differ from result in mesh.split,
which is calculated from FACE rather than vertex adjacency.
Returns
-----------
count : int
Number of connected vertex groups
"""
# labels are (len(vertices), int) OB
count, labels = graph.csgraph.connected_components(
self.edges_sparse,
directed=False,
return_labels=True)
self._cache['vertices_component_label'] = labels
return count
@caching.cache_decorator
def faces_unique_edges(self):
"""
For each face return which indexes in mesh.unique_edges constructs
that face.
Returns
---------
faces_unique_edges : (len(self.faces), 3) int
Indexes of self.edges_unique that
construct self.faces
Examples
---------
In [0]: mesh.faces[:2]
Out[0]:
TrackedArray([[ 1, 6946, 24224],
[ 6946, 1727, 24225]])
In [1]: mesh.edges_unique[mesh.faces_unique_edges[:2]]
Out[1]:
array([[[ 1, 6946],
[ 6946, 24224],
[ 1, 24224]],
[[ 1727, 6946],
[ 1727, 24225],
[ 6946, 24225]]])
"""
# make sure we have populated unique edges
_ = self.edges_unique
# we are relying on the fact that edges are stacked in triplets
result = self._cache['edges_unique_inverse'].reshape((-1, 3))
return result
@caching.cache_decorator
def euler_number(self):
"""
Return the Euler characteristic (a topological invariant) for the mesh
In order to guarantee correctness, this should be called after
remove_unreferenced_vertices
Returns
----------
euler_number : int
Topological invariant
"""
euler = int(self.referenced_vertices.sum() -
len(self.edges_unique) +
len(self.faces))
return euler
@caching.cache_decorator
def referenced_vertices(self):
"""
Which vertices in the current mesh are referenced by a face.
Returns
-------------
referenced : (len(self.vertices), ) bool
Which vertices are referenced by a face
"""
referenced = np.zeros(len(self.vertices), dtype=bool)
referenced[self.faces] = True
return referenced
@property
def units(self):
"""
Definition of units for the mesh.
Returns
----------
units : str
Unit system mesh is in, or None if not defined
"""
if 'units' in self.metadata:
return self.metadata['units']
else:
return None
@units.setter
def units(self, value):
value = str(value).lower()
self.metadata['units'] = value
def convert_units(self, desired, guess=False):
"""
Convert the units of the mesh into a specified unit.
Parameters
------------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert?
"""
units._convert_units(self, desired, guess)
return self
def merge_vertices(
self,
merge_tex=None,
merge_norm=None,
digits_vertex=None,
digits_norm=None,
digits_uv=None):
"""
Removes duplicate vertices grouped by position and
optionally texture coordinate and normal.
Parameters
-------------
mesh : Trimesh object
Mesh to merge vertices on
merge_tex : bool
If True textured meshes with UV coordinates will
have vertices merged regardless of UV coordinates
merge_norm : bool
If True, meshes with vertex normals will have
vertices merged ignoring different normals
digits_vertex : None or int
Number of digits to consider for vertex position
digits_norm : int
Number of digits to consider for unit normals
digits_uv : int
Number of digits to consider for UV coordinates
"""
grouping.merge_vertices(
mesh=self,
merge_tex=merge_tex,
merge_norm=merge_norm,
digits_vertex=digits_vertex,
digits_norm=digits_norm,
digits_uv=digits_uv)
def update_vertices(self, mask, inverse=None):
"""
Update vertices with a mask.
Parameters
------------
vertex_mask : (len(self.vertices)) bool
Array of which vertices to keep
inverse : (len(self.vertices)) int
Array to reconstruct vertex references
such as output by np.unique
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
# make sure mask is a numpy array
mask = np.asanyarray(mask)
if ((mask.dtype.name == 'bool' and mask.all()) or
len(mask) == 0 or self.is_empty):
# mask doesn't remove any vertices so exit early
return
# create the inverse mask if not passed
if inverse is None:
inverse = np.zeros(len(self.vertices), dtype=np.int64)
if mask.dtype.kind == 'b':
inverse[mask] = np.arange(mask.sum())
elif mask.dtype.kind == 'i':
inverse[mask] = np.arange(len(mask))
else:
inverse = None
# re-index faces from inverse
if inverse is not None and util.is_shape(self.faces, (-1, 3)):
self.faces = inverse[self.faces.reshape(-1)].reshape((-1, 3))
# update the visual object with our mask
self.visual.update_vertices(mask)
# get the normals from cache before dumping
cached_normals = self._cache['vertex_normals']
# apply to face_attributes
count = len(self.vertices)
for key, value in self.vertex_attributes.items():
try:
# covers un-len'd objects as well
if len(value) != count:
raise TypeError()
except TypeError:
continue
# apply the mask to the attribute
self.vertex_attributes[key] = value[mask]
# actually apply the mask
self.vertices = self.vertices[mask]
# if we had passed vertex normals try to save them
if util.is_shape(cached_normals, (-1, 3)):
try:
self.vertex_normals = cached_normals[mask]
except BaseException:
pass
def update_faces(self, mask):
"""
In many cases, we will want to remove specific faces.
However, there is additional bookkeeping to do this cleanly.
This function updates the set of faces with a validity mask,
as well as keeping track of normals and colors.
Parameters
------------
valid : (m) int or (len(self.faces)) bool
Mask to remove faces
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
mask = np.asanyarray(mask)
if mask.dtype.name == 'bool' and mask.all():
# mask removes no faces so exit early
return
# try to save face normals before dumping cache
cached_normals = self._cache['face_normals']
faces = self._data['faces']
# if Trimesh has been subclassed and faces have been moved
# from data to cache, get faces from cache.
if not util.is_shape(faces, (-1, 3)):
faces = self._cache['faces']
# apply to face_attributes
count = len(self.faces)
for key, value in self.face_attributes.items():
try:
# covers un-len'd objects as well
if len(value) != count:
raise TypeError()
except TypeError:
continue
# apply the mask to the attribute
self.face_attributes[key] = value[mask]
# actually apply the mask
self.faces = faces[mask]
# apply to face colors
self.visual.update_faces(mask)
# if our normals were the correct shape apply them
if util.is_shape(cached_normals, (-1, 3)):
self.face_normals = cached_normals[mask]
def remove_infinite_values(self):
"""
Ensure that every vertex and face consists of finite numbers.
This will remove vertices or faces containing np.nan and np.inf
Alters `self.faces` and `self.vertices`
"""
if util.is_shape(self.faces, (-1, 3)):
# (len(self.faces), ) bool, mask for faces
face_mask = np.isfinite(self.faces).all(axis=1)
self.update_faces(face_mask)
if util.is_shape(self.vertices, (-1, 3)):
# (len(self.vertices), ) bool, mask for vertices
vertex_mask = np.isfinite(self.vertices).all(axis=1)
self.update_vertices(vertex_mask)
def remove_duplicate_faces(self):
"""
On the current mesh remove any faces which are duplicates.
Alters `self.faces` to remove duplicate faces
"""
unique, inverse = grouping.unique_rows(np.sort(self.faces, axis=1))
self.update_faces(unique)
def rezero(self):
"""
Translate the mesh so that all vertex vertices are positive.
Alters `self.vertices`.
"""
self.apply_translation(self.bounds[0] * -1.0)
@log_time
def split(self, **kwargs):
"""
Returns a list of Trimesh objects, based on face connectivity.
Splits into individual components, sometimes referred to as 'bodies'
Parameters
------------
only_watertight : bool
Only return watertight meshes and discard remainder
adjacency : None or (n, 2) int
Override face adjacency with custom values
Returns
---------
meshes : (n, ) trimesh.Trimesh
Separate bodies from original mesh
"""
return graph.split(self, **kwargs)
@caching.cache_decorator
def face_adjacency(self):
"""
Find faces that share an edge i.e. 'adjacent' faces.
Returns
----------
adjacency : (n, 2) int
Pairs of faces which share an edge
Examples
---------
In [1]: mesh = trimesh.load('models/featuretype.STL')
In [2]: mesh.face_adjacency
Out[2]:
array([[ 0, 1],
[ 2, 3],
[ 0, 3],
...,
[1112, 949],
[3467, 3475],
[1113, 3475]])
In [3]: mesh.faces[mesh.face_adjacency[0]]
Out[3]:
TrackedArray([[ 1, 0, 408],
[1239, 0, 1]], dtype=int64)
In [4]: import networkx as nx
In [5]: graph = nx.from_edgelist(mesh.face_adjacency)
In [6]: groups = nx.connected_components(graph)
"""
adjacency, edges = graph.face_adjacency(
mesh=self, return_edges=True)
self._cache['face_adjacency_edges'] = edges
return adjacency
@caching.cache_decorator
def face_neighborhood(self):
"""
Find faces that share a vertex i.e. 'neighbors' faces.
Returns
----------
neighborhood : (n, 2) int
Pairs of faces which share a vertex
"""
return graph.face_neighborhood(self)
@caching.cache_decorator
def face_adjacency_edges(self):
"""
Returns the edges that are shared by the adjacent faces.
Returns
--------
edges : (n, 2) int
Vertex indices which correspond to face_adjacency
"""
# this value is calculated as a byproduct of the face adjacency
_ = self.face_adjacency
return self._cache['face_adjacency_edges']
@caching.cache_decorator
def face_adjacency_edges_tree(self):
"""
A KDTree for mapping edges back face adjacency index.
Returns
------------
tree : scipy.spatial.cKDTree
Tree when queried with SORTED edges will return
their index in mesh.face_adjacency
"""
from scipy.spatial import cKDTree
return cKDTree(self.face_adjacency_edges)
@caching.cache_decorator
def face_adjacency_angles(self):
"""
Return the angle between adjacent faces
Returns
--------
adjacency_angle : (n, ) float
Angle between adjacent faces
Each value corresponds with self.face_adjacency
"""
# get pairs of unit vectors for adjacent faces
pairs = self.face_normals[self.face_adjacency]
# find the angle between the pairs of vectors
angles = geometry.vector_angle(pairs)
return angles
@caching.cache_decorator
def face_adjacency_projections(self):
"""
The projection of the non-shared vertex of a triangle onto
its adjacent face
Returns
----------
projections : (len(self.face_adjacency), ) float
Dot product of vertex
onto plane of adjacent triangle.
"""
projections = convex.adjacency_projections(self)
return projections
@caching.cache_decorator
def face_adjacency_convex(self):
"""
Return faces which are adjacent and locally convex.
What this means is that given faces A and B, the one vertex
in B that is not shared with A, projected onto the plane of A
has a projection that is zero or negative.
Returns
----------
are_convex : (len(self.face_adjacency), ) bool
Face pairs that are locally convex
"""
are_convex = self.face_adjacency_projections < tol.merge
return are_convex
@caching.cache_decorator
def face_adjacency_unshared(self):
"""
Return the vertex index of the two vertices not in the shared
edge between two adjacent faces
Returns
-----------
vid_unshared : (len(mesh.face_adjacency), 2) int
Indexes of mesh.vertices
"""
vid_unshared = graph.face_adjacency_unshared(self)
return vid_unshared
@caching.cache_decorator
def face_adjacency_radius(self):
"""
The approximate radius of a cylinder that fits inside adjacent faces.
Returns
------------
radii : (len(self.face_adjacency), ) float
Approximate radius formed by triangle pair
"""
radii, span = graph.face_adjacency_radius(mesh=self)
self._cache['face_adjacency_span'] = span
return radii
@caching.cache_decorator
def face_adjacency_span(self):
"""
The approximate perpendicular projection of the non-shared
vertices in a pair of adjacent faces onto the shared edge of
the two faces.
Returns
------------
span : (len(self.face_adjacency), ) float
Approximate span between the non-shared vertices
"""
_ = self.face_adjacency_radius
return self._cache['face_adjacency_span']
@caching.cache_decorator
def integral_mean_curvature(self):
"""
The integral mean curvature, or the surface integral of the mean curvature.
Returns
---------
area : float
Integral mean curvature of mesh
"""
edges_length = np.linalg.norm(np.subtract(
*self.vertices[self.face_adjacency_edges.T]), axis=1)
imc = (self.face_adjacency_angles * edges_length).sum() * 0.5
return imc
@caching.cache_decorator
def vertex_adjacency_graph(self):
"""
Returns a networkx graph representing the vertices and their connections
in the mesh.
Returns
---------
graph: networkx.Graph
Graph representing vertices and edges between
them where vertices are nodes and edges are edges
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
mesh = trimesh.primitives.Box()
graph = mesh.vertex_adjacency_graph
graph.neighbors(0)
> [1, 2, 3, 4]
"""
adjacency_g = graph.vertex_adjacency_graph(mesh=self)
return adjacency_g
@caching.cache_decorator
def vertex_neighbors(self):
"""
The vertex neighbors of each vertex of the mesh, determined from
the cached vertex_adjacency_graph, if already existent.
Returns
----------
vertex_neighbors : (len(self.vertices), ) int
Represents immediate neighbors of each vertex along
the edge of a triangle
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
>>> mesh = trimesh.primitives.Box()
>>> mesh.vertex_neighbors[0]
[1, 2, 3, 4]
"""
return graph.neighbors(
edges=self.edges_unique, max_index=len(self.vertices))
@caching.cache_decorator
def is_winding_consistent(self):
"""
Does the mesh have consistent winding or not.
A mesh with consistent winding has each shared edge
going in an opposite direction from the other in the pair.
Returns
--------
consistent : bool
Is winding is consistent or not
"""
if self.is_empty:
return False
# consistent winding check is populated into the cache by is_watertight
_ = self.is_watertight
return self._cache['is_winding_consistent']
@caching.cache_decorator
def is_watertight(self):
"""
Check if a mesh is watertight by making sure every edge is
included in two faces.
Returns
----------
is_watertight : bool
Is mesh watertight or not
"""
if self.is_empty:
return False
watertight, winding = graph.is_watertight(
edges=self.edges, edges_sorted=self.edges_sorted)
self._cache['is_winding_consistent'] = winding
return watertight
@caching.cache_decorator
def is_volume(self):
"""
Check if a mesh has all the properties required to represent
a valid volume, rather than just a surface.
These properties include being watertight, having consistent
winding and outward facing normals.
Returns
---------
valid : bool
Does the mesh represent a volume
"""
valid = bool(self.is_watertight and
self.is_winding_consistent and
np.isfinite(self.center_mass).all() and
self.volume > 0.0)
return valid
@property
def is_empty(self):
"""
Does the current mesh have data defined.
Returns
--------
empty : bool
If True, no data is set on the current mesh
"""
return self._data.is_empty()
@caching.cache_decorator
def is_convex(self):
"""
Check if a mesh is convex or not.
Returns
----------
is_convex: bool
Is mesh convex or not
"""
if self.is_empty:
return False
is_convex = bool(convex.is_convex(self))
return is_convex
@caching.cache_decorator
def kdtree(self):
"""
Return a scipy.spatial.cKDTree of the vertices of the mesh.
Not cached as this lead to observed memory issues and segfaults.
Returns
---------
tree : scipy.spatial.cKDTree
Contains mesh.vertices
"""
from scipy.spatial import cKDTree
tree = cKDTree(self.vertices.view(np.ndarray))
return tree
def remove_degenerate_faces(self, height=tol.merge):
"""
Remove degenerate faces (faces without 3 unique vertex indices)
from the current mesh.
If a height is specified, it will remove any face with a 2D oriented
bounding box with one edge shorter than that height.
If not specified, it will remove any face with a zero normal.
Parameters
------------
height : float
If specified removes faces with an oriented bounding
box shorter than this on one side.
Returns
-------------
nondegenerate : (len(self.faces), ) bool
Mask used to remove faces
"""
nondegenerate = triangles.nondegenerate(
self.triangles,
areas=self.area_faces,
height=height)
self.update_faces(nondegenerate)
return nondegenerate
@caching.cache_decorator
def facets(self):
"""
Return a list of face indices for coplanar adjacent faces.
Returns
---------
facets : (n, ) sequence of (m, ) int
Groups of indexes of self.faces
"""
facets = graph.facets(self)
return facets
@caching.cache_decorator
def facets_area(self):
"""
Return an array containing the area of each facet.
Returns
---------
area : (len(self.facets), ) float
Total area of each facet (group of faces)
"""
# avoid thrashing the cache inside a loop
area_faces = self.area_faces
# sum the area of each group of faces represented by facets
# use native python sum in tight loop as opposed to array.sum()
# as in this case the lower function call overhead of
# native sum provides roughly a 50% speedup
areas = np.array([sum(area_faces[i])
for i in self.facets],
dtype=np.float64)
return areas
@caching.cache_decorator
def facets_normal(self):
"""
Return the normal of each facet
Returns
---------
normals: (len(self.facets), 3) float
A unit normal vector for each facet
"""
if len(self.facets) == 0:
return np.array([])
area_faces = self.area_faces
# the face index of the largest face in each facet
index = np.array([i[area_faces[i].argmax()]
for i in self.facets])
# (n, 3) float, unit normal vectors of facet plane
normals = self.face_normals[index]
# (n, 3) float, points on facet plane
origins = self.vertices[self.faces[:, 0][index]]
# save origins in cache
self._cache['facets_origin'] = origins
return normals
@caching.cache_decorator
def facets_origin(self):
"""
Return a point on the facet plane.
Returns
------------
origins : (len(self.facets), 3) float
A point on each facet plane
"""
_ = self.facets_normal
return self._cache['facets_origin']
@caching.cache_decorator
def facets_boundary(self):
"""
Return the edges which represent the boundary of each facet
Returns
---------
edges_boundary : sequence of (n, 2) int
Indices of self.vertices
"""
# make each row correspond to a single face
edges = self.edges_sorted.reshape((-1, 6))
# get the edges for each facet
edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets]
edges_boundary = [i[grouping.group_rows(i, require_count=1)]
for i in edges_facet]
return edges_boundary
@caching.cache_decorator
def facets_on_hull(self):
"""
Find which facets of the mesh are on the convex hull.
Returns
---------
on_hull : (len(mesh.facets), ) bool
is A facet on the meshes convex hull or not
"""
# if no facets exit early
if len(self.facets) == 0:
return np.array([], dtype=bool)
# facets plane, origin and normal
normals = self.facets_normal
origins = self.facets_origin
# (n, 3) convex hull vertices
convex = self.convex_hull.vertices.view(np.ndarray).copy()
# boolean mask for which facets are on convex hull
on_hull = np.zeros(len(self.facets), dtype=bool)
for i, normal, origin in zip(range(len(normals)), normals, origins):
# a facet plane is on the convex hull if every vertex
# of the convex hull is behind that plane
# which we are checking with dot products
dot = np.dot(normal, (convex - origin).T)
on_hull[i] = (dot < tol.merge).all()
return on_hull
@log_time
def fix_normals(self, multibody=None):
"""
Find and fix problems with self.face_normals and self.faces
winding direction.
For face normals ensure that vectors are consistently pointed
outwards, and that self.faces is wound in the correct direction
for all connected components.
Parameters
-------------
multibody : None or bool
Fix normals across multiple bodies
if None automatically pick from body_count
"""
if multibody is None:
multibody = self.body_count > 1
repair.fix_normals(self, multibody=multibody)
def fill_holes(self):
"""
Fill single triangle and single quad holes in the current mesh.
Returns
----------
watertight : bool
Is the mesh watertight after the function completes
"""
return repair.fill_holes(self)
def register(self, other, **kwargs):
"""
Align a mesh with another mesh or a PointCloud using
the principal axes of inertia as a starting point which
is refined by iterative closest point.
Parameters
------------
mesh : trimesh.Trimesh object
Mesh to align with other
other : trimesh.Trimesh or (n, 3) float
Mesh or points in space
samples : int
Number of samples from mesh surface to align
icp_first : int
How many ICP iterations for the 9 possible
combinations of
icp_final : int
How many ICP itertations for the closest
candidate from the wider search
Returns
-----------
mesh_to_other : (4, 4) float
Transform to align mesh to the other object
cost : float
Average square distance per point
"""
mesh_to_other, cost = registration.mesh_other(
mesh=self,
other=other,
**kwargs)
return mesh_to_other, cost
def compute_stable_poses(self,
center_mass=None,
sigma=0.0,
n_samples=1,
threshold=0.0):
"""
Computes stable orientations of a mesh and their quasi-static probabilities.
This method samples the location of the center of mass from a multivariate
gaussian (mean at com, cov equal to identity times sigma) over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaluates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogeneous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
------------
center_mass : (3, ) float
The object center of mass (if None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly)
sigma : float
The covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogeneous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n, ) float
A probability ranging from 0.0 to 1.0 for each pose
"""
return poses.compute_stable_poses(mesh=self,
center_mass=center_mass,
sigma=sigma,
n_samples=n_samples,
threshold=threshold)
def subdivide(self, face_index=None):
"""
Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
------------
face_index: (m, ) int or None
If None all faces of mesh will be subdivided
If (m, ) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight
"""
# subdivide vertex attributes
vertex_attributes = {}
visual = None
if (hasattr(self.visual, 'uv') and
np.shape(self.visual.uv) == (len(self.vertices), 2)):
# uv coords divided along with vertices
vertices, faces, attr = remesh.subdivide(
vertices=np.hstack((self.vertices, self.visual.uv)),
faces=self.faces,
face_index=face_index,
vertex_attributes=vertex_attributes)
# get a copy of the current visuals
visual = self.visual.copy()
# separate uv coords and vertices
vertices, visual.uv = vertices[:, :3], vertices[:, 3:]
else:
# perform the subdivision with vertex attributes
vertices, faces, attr = remesh.subdivide(
vertices=self.vertices,
faces=self.faces,
face_index=face_index,
vertex_attributes=vertex_attributes)
# create a new mesh
result = Trimesh(
vertices=vertices,
faces=faces,
visual=visual,
vertex_attributes=attr,
process=False)
return result
def subdivide_to_size(self, max_edge, max_iter=10, return_index=False):
"""
Subdivide a mesh until every edge is shorter than a
specified length.
Will return a triangle soup, not a nicely structured mesh.
Parameters
------------
max_edge : float
Maximum length of any edge in the result
max_iter : int
The maximum number of times to run subdivision
return_index : bool
If True, return index of original face for new faces
"""
# subdivide vertex attributes
visual = None
if (hasattr(self.visual, 'uv') and
np.shape(self.visual.uv) == (len(self.vertices), 2)):
# uv coords divided along with vertices
vertices_faces = remesh.subdivide_to_size(
vertices=np.hstack((self.vertices, self.visual.uv)),
faces=self.faces,
max_edge=max_edge,
max_iter=max_iter,
return_index=return_index)
# unpack result
if return_index:
vertices, faces, final_index = vertices_faces
else:
vertices, faces = vertices_faces
# get a copy of the current visuals
visual = self.visual.copy()
# separate uv coords and vertices
vertices, visual.uv = vertices[:, :3], vertices[:, 3:]
else:
# uv coords divided along with vertices
vertices_faces = remesh.subdivide_to_size(
vertices=self.vertices,
faces=self.faces,
max_edge=max_edge,
max_iter=max_iter,
return_index=return_index)
# unpack result
if return_index:
vertices, faces, final_index = vertices_faces
else:
vertices, faces = vertices_faces
# create a new mesh
result = Trimesh(
vertices=vertices,
faces=faces,
visual=visual,
process=False)
if return_index:
return result, final_index
return result
@log_time
def smoothed(self, **kwargs):
"""
Return a version of the current mesh which will render
nicely, without changing source mesh.
Parameters
-------------
angle : float or None
Angle in radians face pairs with angles
smaller than this will appear smoothed
facet_minarea : float or None
Minimum area fraction to consider
IE for `facets_minarea=25` only facets larger
than `mesh.area / 25` will be considered.
Returns
---------
smoothed : trimesh.Trimesh
Non watertight version of current mesh
which will render nicely with smooth shading
"""
# smooth should be recomputed if visuals change
self.visual._verify_hash()
cached = self.visual._cache['smoothed']
if cached is not None:
return cached
# run smoothing
smoothed = graph.smoothed(
self, **kwargs)
self.visual._cache['smoothed'] = smoothed
return smoothed
@property
def visual(self):
"""
Get the stored visuals for the current mesh.
Returns
-------------
visual : ColorVisuals or TextureVisuals
Contains visual information about the mesh
"""
if hasattr(self, '_visual'):
return self._visual
return None
@visual.setter
def visual(self, value):
"""
When setting a visual object, always make sure
that `visual.mesh` points back to the source mesh.
Parameters
--------------
visual : ColorVisuals or TextureVisuals
Contains visual information about the mesh
"""
value.mesh = self
self._visual = value
def section(self,
plane_normal,
plane_origin,
**kwargs):
"""
Returns a 3D cross section of the current mesh and a plane
defined by origin and normal.
Parameters
------------
plane_normal: (3) vector for plane normal
Normal vector of section plane
plane_origin : (3, ) float
Point on the cross section plane
Returns
---------
intersections: Path3D or None
Curve of intersection
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# return a single cross section in 3D
lines, face_index = intersections.mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
return_faces=True,
**kwargs)
# if the section didn't hit the mesh return None
if len(lines) == 0:
return None
# otherwise load the line segments into a Path3D object
path = load_path(lines)
# add the face index info into metadata
path.metadata['face_index'] = face_index
return path
def section_multiplane(self,
plane_origin,
plane_normal,
heights):
"""
Return multiple parallel cross sections of the current
mesh in 2D.
Parameters
------------
plane_origin : (3, ) float
Point on the cross section plane
plane_normal: (3) vector for plane normal
Normal vector of section plane
heights : (n, ) float
Each section is offset by height along
the plane normal.
Returns
---------
paths : (n, ) Path2D or None
2D cross sections at specified heights.
path.metadata['to_3D'] contains transform
to return 2D section back into 3D space.
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# do a multiplane intersection
lines, transforms, faces = intersections.mesh_multiplane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
heights=heights)
# turn the line segments into Path2D objects
paths = [None] * len(lines)
for i, faces, segments, T in zip(range(len(lines)),
faces,
lines,
transforms):
if len(segments) > 0:
paths[i] = load_path(
segments,
metadata={'to_3D': T, 'face_index': faces})
return paths
def slice_plane(self,
plane_origin,
plane_normal,
cap=False,
face_index=None,
cached_dots=None,
**kwargs):
"""
Slice the mesh with a plane, returning a new mesh that is the
portion of the original mesh to the positive normal side of the plane
plane_origin : (3,) float
Point on plane to intersect with mesh
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
cap : bool
If True, cap the result with a triangulated polygon
face_index : ((m,) int)
Indexes of mesh.faces to slice. When no mask is
provided, the default is to slice all faces.
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
---------
new_mesh: trimesh.Trimesh or None
Subset of current mesh that intersects the half plane
to the positive normal side of the plane
"""
# return a new mesh
new_mesh = intersections.slice_mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
cap=cap,
face_index=face_index,
cached_dots=cached_dots,
**kwargs)
return new_mesh
def unwrap(self, image=None):
"""
Returns a Trimesh object equivalent to the current mesh where
the vertices have been assigned uv texture coordinates. Vertices
may be split into as many as necessary by the unwrapping
algorithm, depending on how many uv maps they appear in.
Requires `pip install xatlas`
Parameters
------------
image : None or PIL.Image
Image to assign to the material
Returns
--------
unwrapped : trimesh.Trimesh
Mesh with unwrapped uv coordinates
"""
import xatlas
vmap, faces, uv = xatlas.parametrize(
self.vertices, self.faces)
result = Trimesh(vertices=self.vertices[vmap],
faces=faces,
visual=TextureVisuals(uv=uv, image=image),
process=False)
# run additional checks for unwrapping
if tol.strict:
# check the export object to make sure we didn't
# move the indices around on creation
assert np.allclose(result.visual.uv, uv)
assert np.allclose(result.faces, faces)
assert np.allclose(result.vertices, self.vertices[vmap])
# check to make sure indices are still the
# same order after we've exported to OBJ
export = result.export(file_type='obj')
uv_recon = np.array([L[3:].split() for L in
str.splitlines(export) if
L.startswith('vt ')],
dtype=np.float64)
assert np.allclose(uv_recon, uv)
v_recon = np.array([L[2:].split() for L in
str.splitlines(export) if
L.startswith('v ')],
dtype=np.float64)
assert np.allclose(v_recon, self.vertices[vmap])
return result
@caching.cache_decorator
def convex_hull(self):
"""
Returns a Trimesh object representing the convex hull of
the current mesh.
Returns
--------
convex : trimesh.Trimesh
Mesh of convex hull of current mesh
"""
hull = convex.convex_hull(self)
return hull
def sample(self, count, return_index=False, face_weight=None):
"""
Return random samples distributed across the
surface of the mesh
Parameters
------------
count : int
Number of points to sample
return_index : bool
If True will also return the index of which face each
sample was taken from.
face_weight : None or len(mesh.faces) float
Weight faces by a factor other than face area.
If None will be the same as face_weight=mesh.area
Returns
---------
samples : (count, 3) float
Points on surface of mesh
face_index : (count, ) int
Index of self.faces
"""
samples, index = sample.sample_surface(
mesh=self, count=count, face_weight=face_weight)
if return_index:
return samples, index
return samples
def remove_unreferenced_vertices(self):
"""
Remove all vertices in the current mesh which are not
referenced by a face.
"""
referenced = np.zeros(len(self.vertices), dtype=bool)
referenced[self.faces] = True
inverse = np.zeros(len(self.vertices), dtype=np.int64)
inverse[referenced] = np.arange(referenced.sum())
self.update_vertices(mask=referenced, inverse=inverse)
def unmerge_vertices(self):
"""
Removes all face references so that every face contains
three unique vertex indices and no faces are adjacent.
"""
# new faces are incrementing so every vertex is unique
faces = np.arange(len(self.faces) * 3,
dtype=np.int64).reshape((-1, 3))
# use update_vertices to apply mask to
# all properties that are per-vertex
self.update_vertices(self.faces.reshape(-1))
# set faces to incrementing indexes
self.faces = faces
# keep face normals as the haven't changed
self._cache.clear(exclude=['face_normals'])
def apply_transform(self, matrix):
"""
Transform mesh by a homogeneous transformation matrix.
Does the bookkeeping to avoid recomputing things so this function
should be used rather than directly modifying self.vertices
if possible.
Parameters
------------
matrix : (4, 4) float
Homogeneous transformation matrix
"""
# get c-order float64 matrix
matrix = np.asanyarray(
matrix, order='C', dtype=np.float64)
# only support homogeneous transformations
if matrix.shape != (4, 4):
raise ValueError('Transformation matrix must be (4, 4)!')
# exit early if we've been passed an identity matrix
# np.allclose is surprisingly slow so do this test
elif util.allclose(matrix, np.eye(4), 1e-8):
log.debug('apply_transform passed identity matrix')
return self
# new vertex positions
new_vertices = transformations.transform_points(
self.vertices,
matrix=matrix)
# check to see if the matrix has rotation
# rather than just translation
has_rotation = not util.allclose(
matrix[:3, :3], np.eye(3), atol=1e-6)
# overridden center of mass
if self._center_mass is not None:
self._center_mass = transformations.transform_points(
np.array([self._center_mass, ]),
matrix)[0]
# preserve face normals if we have them stored
if has_rotation and 'face_normals' in self._cache:
# transform face normals by rotation component
self._cache.cache['face_normals'] = util.unitize(
transformations.transform_points(
self.face_normals,
matrix=matrix,
translate=False))
# preserve vertex normals if we have them stored
if has_rotation and 'vertex_normals' in self._cache:
self._cache.cache['vertex_normals'] = util.unitize(
transformations.transform_points(
self.vertex_normals,
matrix=matrix,
translate=False))
# if transformation flips winding of triangles
if has_rotation and transformations.flips_winding(matrix):
log.debug('transform flips winding')
# fliplr will make array non C contiguous
# which will cause hashes to be more
# expensive than necessary so wrap
self.faces = np.ascontiguousarray(
np.fliplr(self.faces))
# assign the new values
self.vertices = new_vertices
# preserve normals and topology in cache
# while dumping everything else
self._cache.clear(exclude={
'face_normals', # transformed by us
'vertex_normals', # also transformed by us
'face_adjacency', # topological
'face_adjacency_edges',
'face_adjacency_unshared',
'edges',
'edges_face',
'edges_sorted',
'edges_unique',
'edges_unique_idx',
'edges_unique_inverse',
'edges_sparse',
'body_count',
'faces_unique_edges',
'euler_number'})
# set the cache ID with the current hash value
self._cache.id_set()
log.debug('mesh transformed by matrix')
return self
def voxelized(self, pitch, method='subdivide', **kwargs):
"""
Return a VoxelGrid object representing the current mesh
discretized into voxels at the specified pitch
Parameters
------------
pitch : float
The edge length of a single voxel
method: implementation key. See `trimesh.voxel.creation.voxelizers`
**kwargs: additional kwargs passed to the specified implementation.
Returns
----------
voxelized : VoxelGrid object
Representing the current mesh
"""
from .voxel import creation
return creation.voxelize(
mesh=self, pitch=pitch, method=method, **kwargs)
@caching.cache_decorator
def as_open3d(self):
"""
Return an `open3d.geometry.TriangleMesh` version of
the current mesh.
Returns
---------
open3d : open3d.geometry.TriangleMesh
Current mesh as an open3d object.
"""
import open3d
# create from numpy arrays
return open3d.geometry.TriangleMesh(
vertices=open3d.utility.Vector3dVector(self.vertices),
triangles=open3d.utility.Vector3iVector(self.faces))
def simplify_quadratic_decimation(self, face_count):
"""
A thin wrapper around the open3d implementation of this:
`open3d.geometry.TriangleMesh.simplify_quadric_decimation`
Parameters
-----------
face_count : int
Number of faces desired in the resulting mesh.
Returns
---------
simple : trimesh.Trimesh
Simplified version of mesh.
"""
simple = self.as_open3d.simplify_quadric_decimation(
int(face_count))
return Trimesh(vertices=simple.vertices, faces=simple.triangles)
def outline(self, face_ids=None, **kwargs):
"""
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
------------
face_ids : (n, ) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
"""
from .path import Path3D
from .path.exchange.misc import faces_to_path
return Path3D(**faces_to_path(
self, face_ids, **kwargs))
def projected(self,
normal,
**kwargs):
"""
Project a mesh onto a plane and then extract the
polygon that outlines the mesh projection on that
plane.
Parameters
----------
mesh : trimesh.Trimesh
Source geometry
check : bool
If True make sure is flat
normal : (3,) float
Normal to extract flat pattern along
origin : None or (3,) float
Origin of plane to project mesh onto
pad : float
Proportion to pad polygons by before unioning
and then de-padding result by to avoid zero-width gaps.
tol_dot : float
Tolerance for discarding on-edge triangles.
max_regions : int
Raise an exception if the mesh has more than this
number of disconnected regions to fail quickly before unioning.
Returns
----------
projected : trimesh.path.Path2D
Outline of source mesh
"""
from .path import Path2D
from .exchange.load import load_path
from .path.polygons import projected
projection = projected(
mesh=self, normal=normal, **kwargs)
if projection is None:
return Path2D()
return load_path(projection)
@caching.cache_decorator
def area(self):
"""
Summed area of all triangles in the current mesh.
Returns
---------
area : float
Surface area of mesh
"""
area = self.area_faces.sum()
return area
@caching.cache_decorator
def area_faces(self):
"""
The area of each face in the mesh.
Returns
---------
area_faces : (n, ) float
Area of each face
"""
area_faces = triangles.area(
crosses=self.triangles_cross,
sum=False)
return area_faces
@caching.cache_decorator
def mass_properties(self):
"""
Returns the mass properties of the current mesh.
Assumes uniform density, and result is probably garbage if mesh
isn't watertight.
Returns
----------
properties : dict
With keys:
'volume' : in global units^3
'mass' : From specified density
'density' : Included again for convenience (same as kwarg density)
'inertia' : Taken at the center of mass and aligned with global
coordinate system
'center_mass' : Center of mass location, in global coordinate system
"""
mass = triangles.mass_properties(
triangles=self.triangles,
crosses=self.triangles_cross,
density=self._density,
center_mass=self._center_mass,
skip_inertia=False)
return mass
def invert(self):
"""
Invert the mesh in-place by reversing the winding of every
face and negating normals without dumping the cache.
Alters `self.faces` by reversing columns, and negating
`self.face_normals` and `self.vertex_normals`.
"""
with self._cache:
if 'face_normals' in self._cache:
self.face_normals = self._cache['face_normals'] * -1.0
if 'vertex_normals' in self._cache:
self.vertex_normals = self._cache['vertex_normals'] * -1.0
# fliplr makes array non-contiguous so cache checks slow
self.faces = np.ascontiguousarray(
np.fliplr(self.faces))
# save our normals
self._cache.clear(exclude=['face_normals',
'vertex_normals'])
def scene(self, **kwargs):
"""
Returns a Scene object containing the current mesh.
Returns
---------
scene : trimesh.scene.scene.Scene
Contains just the current mesh
"""
return Scene(self, **kwargs)
def show(self, **kwargs):
"""
Render the mesh in an opengl window. Requires pyglet.
Parameters
------------
smooth : bool
Run smooth shading on mesh or not,
large meshes will be slow
Returns
-----------
scene : trimesh.scene.Scene
Scene with current mesh in it
"""
scene = self.scene()
return scene.show(**kwargs)
def submesh(self, faces_sequence, **kwargs):
"""
Return a subset of the mesh.
Parameters
------------
faces_sequence : sequence (m, ) int
Face indices of mesh
only_watertight : bool
Only return submeshes which are watertight
append : bool
Return a single mesh which has the faces appended.
if this flag is set, only_watertight is ignored
Returns
---------
submesh : Trimesh or (n,) Trimesh
Single mesh if `append` or list of submeshes
"""
return util.submesh(
mesh=self,
faces_sequence=faces_sequence,
**kwargs)
@caching.cache_decorator
def identifier(self):
"""
Return a float vector which is unique to the mesh
and is robust to rotation and translation.
Returns
-----------
identifier : (7,) float
Identifying properties of the current mesh
"""
return comparison.identifier_simple(self)
@caching.cache_decorator
def identifier_hash(self):
"""
A hash of the rotation invariant identifier vector.
Returns
---------
hashed : str
Hex string of the SHA256 hash from
the identifier vector at hand-tuned sigfigs.
"""
return comparison.identifier_hash(self.identifier)
@property
def identifier_md5(self):
warnings.warn(
'`geom.identifier_md5` is deprecated and will ' +
'be removed in October 2023: replace ' +
'with `geom.identifier_hash`',
DeprecationWarning)
return self.identifier_hash
def export(self, file_obj=None, file_type=None, **kwargs):
"""
Export the current mesh to a file object.
If file_obj is a filename, file will be written there.
Supported formats are stl, off, ply, collada, json,
dict, glb, dict64, msgpack.
Parameters
------------
file_obj : open writeable file object
str, file name where to save the mesh
None, return the export blob
file_type : str
Which file type to export as, if `file_name`
is passed this is not required.
"""
return export_mesh(
mesh=self,
file_obj=file_obj,
file_type=file_type,
**kwargs)
def to_dict(self):
"""
Return a dictionary representation of the current mesh
with keys that can be used as the kwargs for the
Trimesh constructor and matches the schema in:
`trimesh/resources/schema/primitive/trimesh.schema.json`
Returns
----------
result : dict
Matches schema and Trimesh constructor.
"""
return {'vertices': self.vertices.tolist(),
'faces': self.faces.tolist()}
def convex_decomposition(self, maxhulls=20, **kwargs):
"""
Compute an approximate convex decomposition of a mesh.
testVHACD Parameters which can be passed as kwargs:
Name Default
-----------------------------------------------------
resolution 100000
max. concavity 0.001
plane down-sampling 4
convex-hull down-sampling 4
alpha 0.05
beta 0.05
maxhulls 10
pca 0
mode 0
max. vertices per convex-hull 64
min. volume to add vertices to convex-hulls 0.0001
convex-hull approximation 1
OpenCL acceleration 1
OpenCL platform ID 0
OpenCL device ID 0
output output.wrl
log log.txt
Parameters
------------
maxhulls : int
Maximum number of convex hulls to return
**kwargs : testVHACD keyword arguments
Returns
-------
meshes : list of trimesh.Trimesh
List of convex meshes that approximate the original
"""
result = decomposition.convex_decomposition(self,
maxhulls=maxhulls,
**kwargs)
return result
def union(self, other, engine=None, **kwargs):
"""
Boolean union between this mesh and n other meshes
Parameters
------------
other : Trimesh or (n, ) Trimesh
Other meshes to union
engine : None or str
Which backend to use
Returns
---------
union : trimesh.Trimesh
Union of self and other Trimesh objects
"""
result = boolean.union(
meshes=np.append(self, other),
engine=engine,
**kwargs)
return result
def difference(self, other, engine=None, **kwargs):
"""
Boolean difference between this mesh and n other meshes
Parameters
------------
other : trimesh.Trimesh, or list of trimesh.Trimesh objects
Meshes to difference
Returns
---------
difference : trimesh.Trimesh
Difference between self and other Trimesh objects
"""
result = boolean.difference(meshes=np.append(self, other),
engine=engine, **kwargs)
return result
def intersection(self, other, engine=None, **kwargs):
"""
Boolean intersection between this mesh and n other meshes
Parameters
------------
other : trimesh.Trimesh, or list of trimesh.Trimesh objects
Meshes to calculate intersections with
Returns
---------
intersection : trimesh.Trimesh
Mesh of the volume contained by all passed meshes
"""
result = boolean.intersection(meshes=np.append(self, other),
engine=engine, **kwargs)
return result
def contains(self, points):
"""
Given an array of points determine whether or not they
are inside the mesh. This raises an error if called on a
non-watertight mesh.
Parameters
------------
points : (n, 3) float
Points in cartesian space
Returns
---------
contains : (n, ) bool
Whether or not each point is inside the mesh
"""
return self.ray.contains_points(points)
@caching.cache_decorator
def face_angles(self):
"""
Returns the angle at each vertex of a face.
Returns
--------
angles : (len(self.faces), 3) float
Angle at each vertex of a face
"""
angles = triangles.angles(self.triangles)
return angles
@caching.cache_decorator
def face_angles_sparse(self):
"""
A sparse matrix representation of the face angles.
Returns
----------
sparse : scipy.sparse.coo_matrix
Float sparse matrix with with shape:
(len(self.vertices), len(self.faces))
"""
angles = curvature.face_angles_sparse(self)
return angles
@caching.cache_decorator
def vertex_defects(self):
"""
Return the vertex defects, or (2*pi) minus the sum of the angles
of every face that includes that vertex.
If a vertex is only included by coplanar triangles, this
will be zero. For convex regions this is positive, and
concave negative.
Returns
--------
vertex_defect : (len(self.vertices), ) float
Vertex defect at the every vertex
"""
defects = curvature.vertex_defects(self)
return defects
@caching.cache_decorator
def vertex_degree(self):
"""
Return the number of faces each vertex is included in.
Returns
----------
degree : (len(self.vertices), ) int
Number of faces each vertex is included in
"""
# get degree through sparse matrix
degree = np.array(self.faces_sparse.sum(axis=1)).flatten()
return degree
@caching.cache_decorator
def face_adjacency_tree(self):
"""
An R-tree of face adjacencies.
Returns
--------
tree: rtree.index
Where each edge in self.face_adjacency has a
rectangular cell
"""
# the (n,6) interleaved bounding box for every line segment
segment_bounds = np.column_stack((
self.vertices[self.face_adjacency_edges].min(axis=1),
self.vertices[self.face_adjacency_edges].max(axis=1)))
tree = util.bounds_tree(segment_bounds)
return tree
def copy(self, include_cache=False):
"""
Safely return a copy of the current mesh.
By default, copied meshes will have emptied cache
to avoid memory issues and so may be slow on initial
operations until caches are regenerated.
Current object will *never* have its cache cleared.
Parameters
------------
include_cache : bool
If True, will shallow copy cached data to new mesh
Returns
---------
copied : trimesh.Trimesh
Copy of current mesh
"""
# start with an empty mesh
copied = Trimesh()
# always deepcopy vertex and face data
copied._data.data = copy.deepcopy(self._data.data)
# copy visual information
copied.visual = self.visual.copy()
# get metadata
copied.metadata = copy.deepcopy(self.metadata)
# get center_mass and density
if self._center_mass is not None:
copied.center_mass = self.center_mass
copied._density = self._density
# make sure cache ID is set initially
copied._cache.verify()
if include_cache:
# shallow copy cached items into the new cache
# since the data didn't change here when the
# data in the new mesh is changed these items
# will be dumped in the new mesh but preserved
# in the original mesh
copied._cache.cache.update(self._cache.cache)
return copied
def __deepcopy__(self, *args):
# interpret deep copy as "get rid of cached data"
return self.copy(include_cache=False)
def __copy__(self, *args):
# interpret shallow copy as "keep cached data"
return self.copy(include_cache=True)
def eval_cached(self, statement, *args):
"""
Evaluate a statement and cache the result before returning.
Statements are evaluated inside the Trimesh object, and
Parameters
------------
statement : str
Statement of valid python code
*args : list
Available inside statement as args[0], etc
Returns
-----------
result : result of running eval on statement with args
Examples
-----------
r = mesh.eval_cached('np.dot(self.vertices, args[0])', [0, 0, 1])
"""
statement = str(statement)
key = 'eval_cached_' + statement
key += '_'.join(str(i) for i in args)
if key in self._cache:
return self._cache[key]
result = eval(statement)
self._cache[key] = result
return result
def __add__(self, other):
"""
Concatenate the mesh with another mesh.
Parameters
------------
other : trimesh.Trimesh object
Mesh to be concatenated with self
Returns
----------
concat : trimesh.Trimesh
Mesh object of combined result
"""
concat = util.concatenate(self, other)
return concat
|
{
"content_hash": "2267fb2d63a2e7ef868985f06e3d32cf",
"timestamp": "",
"source": "github",
"line_count": 3040,
"max_line_length": 86,
"avg_line_length": 32.03980263157895,
"alnum_prop": 0.5591626369339124,
"repo_name": "mikedh/trimesh",
"id": "5e63e6bc0adeb86ae919c06d78e9a1644132ed88",
"size": "97401",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "trimesh/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "580"
},
{
"name": "JavaScript",
"bytes": "5887"
},
{
"name": "Makefile",
"bytes": "1862"
},
{
"name": "Python",
"bytes": "2142314"
},
{
"name": "Shell",
"bytes": "5161"
}
],
"symlink_target": ""
}
|
"""
Interfaces with Egardia/Woonveilig alarm control panel.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.egardia/
"""
import logging
import requests
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY, STATE_ALARM_TRIGGERED,
STATE_ALARM_ARMED_NIGHT)
from homeassistant.components.egardia import (
EGARDIA_DEVICE, EGARDIA_SERVER,
REPORT_SERVER_CODES_IGNORE, CONF_REPORT_SERVER_CODES,
CONF_REPORT_SERVER_ENABLED, CONF_REPORT_SERVER_PORT
)
DEPENDENCIES = ['egardia']
_LOGGER = logging.getLogger(__name__)
STATES = {
'ARM': STATE_ALARM_ARMED_AWAY,
'DAY HOME': STATE_ALARM_ARMED_HOME,
'DISARM': STATE_ALARM_DISARMED,
'ARMHOME': STATE_ALARM_ARMED_HOME,
'HOME': STATE_ALARM_ARMED_HOME,
'NIGHT HOME': STATE_ALARM_ARMED_NIGHT,
'TRIGGERED': STATE_ALARM_TRIGGERED
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Egardia platform."""
if discovery_info is None:
return
device = EgardiaAlarm(
discovery_info['name'],
hass.data[EGARDIA_DEVICE],
discovery_info[CONF_REPORT_SERVER_ENABLED],
discovery_info.get(CONF_REPORT_SERVER_CODES),
discovery_info[CONF_REPORT_SERVER_PORT])
# add egardia alarm device
add_entities([device], True)
class EgardiaAlarm(alarm.AlarmControlPanel):
"""Representation of a Egardia alarm."""
def __init__(self, name, egardiasystem,
rs_enabled=False, rs_codes=None, rs_port=52010):
"""Initialize the Egardia alarm."""
self._name = name
self._egardiasystem = egardiasystem
self._status = None
self._rs_enabled = rs_enabled
self._rs_codes = rs_codes
self._rs_port = rs_port
async def async_added_to_hass(self):
"""Add Egardiaserver callback if enabled."""
if self._rs_enabled:
_LOGGER.debug("Registering callback to Egardiaserver")
self.hass.data[EGARDIA_SERVER].register_callback(
self.handle_status_event)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._status
@property
def should_poll(self):
"""Poll if no report server is enabled."""
if not self._rs_enabled:
return True
return False
def handle_status_event(self, event):
"""Handle the Egardia system status event."""
statuscode = event.get('status')
if statuscode is not None:
status = self.lookupstatusfromcode(statuscode)
self.parsestatus(status)
self.schedule_update_ha_state()
def lookupstatusfromcode(self, statuscode):
"""Look at the rs_codes and returns the status from the code."""
status = next((
status_group.upper() for status_group, codes
in self._rs_codes.items() for code in codes
if statuscode == code), 'UNKNOWN')
return status
def parsestatus(self, status):
"""Parse the status."""
_LOGGER.debug("Parsing status %s", status)
# Ignore the statuscode if it is IGNORE
if status.lower().strip() != REPORT_SERVER_CODES_IGNORE:
_LOGGER.debug("Not ignoring status %s", status)
newstatus = STATES.get(status.upper())
_LOGGER.debug("newstatus %s", newstatus)
self._status = newstatus
else:
_LOGGER.error("Ignoring status")
def update(self):
"""Update the alarm status."""
status = self._egardiasystem.getstate()
self.parsestatus(status)
def alarm_disarm(self, code=None):
"""Send disarm command."""
try:
self._egardiasystem.alarm_disarm()
except requests.exceptions.RequestException as err:
_LOGGER.error("Egardia device exception occurred when "
"sending disarm command: %s", err)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
try:
self._egardiasystem.alarm_arm_home()
except requests.exceptions.RequestException as err:
_LOGGER.error("Egardia device exception occurred when "
"sending arm home command: %s", err)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
try:
self._egardiasystem.alarm_arm_away()
except requests.exceptions.RequestException as err:
_LOGGER.error("Egardia device exception occurred when "
"sending arm away command: %s", err)
|
{
"content_hash": "e3312eb668a06d3b0fc82f6462200a28",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 74,
"avg_line_length": 34.359154929577464,
"alnum_prop": 0.6247181799549087,
"repo_name": "PetePriority/home-assistant",
"id": "dfd60c4abde14df03ee52648e646066338c55628",
"size": "4879",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/egardia/alarm_control_panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from subprocess import call
import sys
from os import access, listdir, makedirs, X_OK
from os.path import dirname, isdir, isfile, join, normcase, normpath
from shutil import rmtree, copy
# A hackish way to import the configuration
sys.path.append(dirname(__file__))
from configuration import ETALON_DIR, TOCOMP_DIR, EXTENSION, TESTSET, IGNORE
from csv_test import main as csvtest_main
#===============================================================================
WIN = (sys.platform == "win32")
# Assumes the default directory layout of Eclipse and project name SG2PS
# SG2PS_HOME is defined in configuration
#SG2PS_EXE = join(SG2PS_HOME, 'Debug', 'SG2PS.exe' if WIN else 'SG2PS')
SG2PS_EXE = '/home/ali/ws-pydev/CSV_Test/sg2ps'
FLAG = '--debug'
INPUT_EXT = '.rgf'
RGF_FOLDER = '/home/ali/sg2ps_tests/rgf_folder'
#RGF_FOLDER = '/home/ali/sg2ps_tests/rgf_passing'
#RGF_FOLDER = '/home/ali/sg2ps_tests/empty'
# Check configuration.py too!
# Save the console output of sg2ps into a <project name>.log file
LOG_EXT = '.log'
RUN_IN_DEBUGGER = False
#===============================================================================
def main():
# Check whether we need to invoke CSV test after generating the input
NO_TEST = '--notest'
argc = len(sys.argv)
if argc > 2 or (argc==2 and sys.argv[1] != NO_TEST):
print('Only the optional', NO_TEST, 'argument is supported')
return
RUN_CSVTEST = argc < 2
if is_there_path_error():
print('Exiting...')
return
print('All paths seem sane')
# Delete the TOCOMP_DIR as it may contain files from a previous run
if isdir(TOCOMP_DIR):
print('Deleting "{}"'.format(TOCOMP_DIR))
rmtree(TOCOMP_DIR)
print('Creating the test folder "{}"'.format(TOCOMP_DIR))
makedirs(TOCOMP_DIR)
# Copy the input files from the RGF folder to the test directory TOCOMP_DIR
to_cp = sorted(f for f in listdir(RGF_FOLDER) if isfile(join(RGF_FOLDER,f)))
for f in to_cp:
copy(join(RGF_FOLDER, f), TOCOMP_DIR)
print('Copied', len(to_cp), 'files ', end='')
print('from "{}" to "{}"'.format(RGF_FOLDER, TOCOMP_DIR))
projects = collect_project_names(to_cp)
if projects is None:
return # the error message has been logged already
if not projects:
print('Something is wrong, no projects found...')
return
# Run the sg2ps executable on the projects in TOCOMP_DIR
# and check if each project generates at least one CSV file
previous_csv_files = set()
for project_name in projects:
cmd = build_command(project_name)
with open(join(TOCOMP_DIR, project_name+LOG_EXT), 'w') as logfile:
ret = call(cmd, cwd=TOCOMP_DIR, stdout=logfile)
# FIXME Simply log and ignore all errors? Otherwise test failures
# cannot be handled, which return non-zero return codes.
if ret:
print('Fatal error when calling {}, exiting'.format(SG2PS_EXE))
return
new_csv_files = get_new_csv_files(TOCOMP_DIR, previous_csv_files)
if not new_csv_files:
print('Error: no new CSV file generated, exiting...')
return
print('New CSV files:', new_csv_files)
previous_csv_files.update(new_csv_files)
print('Test file generation finished')
if RUN_CSVTEST:
print('Invoking CSV test now\n')
extra_msg_in_header = 'RGF files are in: "{}"'.format(RGF_FOLDER)
csvtest_main(extra_msg_in_header)
else:
print('Not running tests as requested; we are done!')
def collect_project_names(to_cp):
# TODO Keep in sync with csv_test which also does something similar
projects = { f[:-len(INPUT_EXT)] for f in to_cp if f.endswith(INPUT_EXT) }
testset = set(TESTSET)
if testset:
missing = sorted(testset - projects)
if missing:
print('The following files in the test set are missing:')
print(missing)
return None
return sorted(projects & testset)
#
projects.difference_update(IGNORE)
return sorted(projects)
def build_command(project_name):
cmd = [SG2PS_EXE, FLAG, project_name]
if RUN_IN_DEBUGGER:
cmd = ['gdb', '--batch', '--command=stacktrace.gdb', '--args'] + cmd
#
print('Command:', end=' ')
for elem in cmd:
print(elem, end=' ')
print()
#
return cmd
def get_new_csv_files(directory, previous_files):
return sorted( f for f in listdir(directory) if f not in previous_files
and isfile(join(directory, f)) and f.endswith(EXTENSION) )
def is_there_path_error():
# Consider replacing this long if - elif with a loop
if not isfile(SG2PS_EXE) or not access(SG2PS_EXE, X_OK):
print('SG2PS is not executable, check: "{}"'.format(SG2PS_EXE))
elif not isdir(ETALON_DIR):
print('ETALON_DIR: not a valid directory path "{}"'.format(ETALON_DIR))
elif not isdir(RGF_FOLDER):
print('RGF_FOLDER: not a valid directory path "{}"'.format(RGF_FOLDER))
elif not isdir(TOCOMP_DIR):
print('TOCOMP_DIR "{}" will be created'.format(TOCOMP_DIR))
return False
# TOCOMP_DIR exists and will be deleted: Check if that can cause data loss
elif samefile_or_dir(TOCOMP_DIR, ETALON_DIR):
print('Etalon and test directory are the same "{}"'.format(ETALON_DIR))
elif samefile_or_dir(TOCOMP_DIR, RGF_FOLDER):
print('RGF and test directory are the same: "{}"'.format(RGF_FOLDER))
elif samefile_or_dir(TOCOMP_DIR, dirname(TOCOMP_DIR)):
print('Give a non-root TOCOMP_DIR directory: "{}"'.format(TOCOMP_DIR))
else:
return False
return True
def samefile_or_dir(f1, f2):
try:
from os.path import samefile
except ImportError:
return normcase(normpath(f1)) == normcase(normpath(f2))
return samefile(f1, f2)
if __name__=='__main__':
main()
|
{
"content_hash": "e0e46cf9c10bc544370d67f529fecf4e",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 86,
"avg_line_length": 36.31325301204819,
"alnum_prop": 0.621765096217651,
"repo_name": "baharev/CSV_Test",
"id": "59a82f97d9dc387a7501a29a8c3d0e3927018eda",
"size": "6192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sg2ps_runner.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "615"
},
{
"name": "Python",
"bytes": "36152"
}
],
"symlink_target": ""
}
|
import sys, serial
import numpy as np
from time import sleep
from collections import deque
from matplotlib import pyplot as plt
# class that holds analog data for N samples
class AnalogData:
# constr
def __init__(self, maxLen):
self.v1 = deque([0.0]*maxLen)
self.v2 = deque([0.0]*maxLen)
self.v3 = deque([0.0]*maxLen)
self.maxLen = maxLen
# ring buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
#Add new data
def add(self, data):
assert(len(data) == 3)
self.addToBuf(self.v1, data[0])
self.addToBuf(self.v2, data[1])
self.addToBuf(self.v3, data[2])
# plot class
class AnalogPlot:
# constr
def __init__(self, analogData):
# set plot to animated
plt.ion()
plt.figure(figsize=(9,8))
self.v1line, = plt.plot(analogData.v1,label="Gyroscope_X",color="red")
self.v2line, = plt.plot(analogData.v2,label="Gyroscope_Y",color="orange")
self.v3line, = plt.plot(analogData.v3,label="Gyroscope_Z",color="green")
plt.xlabel("Time")
plt.ylabel("PWM range")
plt.title("Measure Gyroscope values")
plt.legend() #Show label figure.
plt.ylim([-600, 600]) # Vertical axis scale.
#TEST plt.ylim([-90, 90]) # Vertical axis scale.
plt.grid()
# update plot
def update(self, analogData):
self.v1line.set_ydata(analogData.v1)
self.v2line.set_ydata(analogData.v2)
self.v3line.set_ydata(analogData.v3)
plt.draw()
def main():
# expects 1 arg - serial port string
if(len(sys.argv) != 2):
print "Type:"
print "sudo chmod 777 /dev/ttyUSB0"
print "python gui3_gyroscope.py '/dev/ttyUSB0'"
exit(1)
#strPort = '/dev/tty.usbserial-A7006Yqh'
strPort = sys.argv[1];
# plot parameters
analogData = AnalogData(200) # Horizontal axis scale.
analogPlot = AnalogPlot(analogData)
print "plotting data..."
a = 1
# open serial port
ser = serial.Serial(strPort, 9600)
while True:
try:
line = ser.readline()
data = [float(val) for val in line.split()]
if (a < 10):
a = a + 1
else:
print data[0] , data[1] ,data[2]
if(len(data) == 3):
analogData.add(data)
analogPlot.update(analogData)
except KeyboardInterrupt:
print "exiting"
break
# close serial
ser.flush()
ser.close()
# call main
if __name__ == '__main__':
main()
|
{
"content_hash": "b7a839fd3e2b8e6a01c93e5564b0b941",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 75,
"avg_line_length": 23.181818181818183,
"alnum_prop": 0.6649237472766885,
"repo_name": "zxc2694/STM32F429_Quadrotor",
"id": "2bb92b8bfed18d537af004190d3f4a38527f4343",
"size": "2710",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "program/pythonGUI/gui3_gyroscope.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "221900"
},
{
"name": "Batchfile",
"bytes": "9891"
},
{
"name": "C",
"bytes": "11651561"
},
{
"name": "C++",
"bytes": "1896418"
},
{
"name": "GDB",
"bytes": "73"
},
{
"name": "Makefile",
"bytes": "4310"
},
{
"name": "Objective-C",
"bytes": "42861"
},
{
"name": "Python",
"bytes": "21872"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import zipfile
from azure.common import AzureMissingResourceHttpError
try:
from azure.storage.blob import BlobService
except ImportError:
from azure.storage.blob import BlockBlobService as BlobService
from shutil import unpack_archive
from threading import Event
# TODOS: use Azure snapshots instead of hacky backups
def fixed_list_blobs(service, *args, **kwargs):
"""By defualt list_containers only returns a subset of results.
This function attempts to fix this.
"""
res = []
next_marker = None
while next_marker is None or len(next_marker) > 0:
kwargs['marker'] = next_marker
gen = service.list_blobs(*args, **kwargs)
for b in gen:
res.append(b.name)
next_marker = gen.next_marker
return res
def make_archive(source_path, dest_path):
if source_path.endswith(os.path.sep):
source_path = source_path.rstrip(os.path.sep)
prefix_path = os.path.dirname(source_path)
with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_STORED) as zf:
if os.path.isdir(source_path):
for dirname, _subdirs, files in os.walk(source_path):
zf.write(dirname, os.path.relpath(dirname, prefix_path))
for filename in files:
filepath = os.path.join(dirname, filename)
zf.write(filepath, os.path.relpath(filepath, prefix_path))
else:
zf.write(source_path, os.path.relpath(source_path, prefix_path))
class Container(object):
services = {}
def __init__(self, account_name, account_key, container_name, maybe_create=False):
self._account_name = account_name
self._container_name = container_name
if account_name not in Container.services:
Container.services[account_name] = BlobService(account_name, account_key)
self._service = Container.services[account_name]
if maybe_create:
self._service.create_container(self._container_name, fail_on_exist=False)
def put(self, source_path, blob_name, callback=None):
"""Upload a file or directory from `source_path` to azure blob `blob_name`.
Upload progress can be traced by an optional callback.
"""
upload_done = Event()
def progress_callback(current, total):
if callback:
callback(current, total)
if current >= total:
upload_done.set()
# Attempt to make backup if an existing version is already available
try:
x_ms_copy_source = "https://{}.blob.core.windows.net/{}/{}".format(
self._account_name,
self._container_name,
blob_name
)
self._service.copy_blob(
container_name=self._container_name,
blob_name=blob_name + ".backup",
x_ms_copy_source=x_ms_copy_source
)
except AzureMissingResourceHttpError:
pass
with tempfile.TemporaryDirectory() as td:
arcpath = os.path.join(td, "archive.zip")
make_archive(source_path, arcpath)
self._service.put_block_blob_from_path(
container_name=self._container_name,
blob_name=blob_name,
file_path=arcpath,
max_connections=4,
progress_callback=progress_callback,
max_retries=10)
upload_done.wait()
def get(self, dest_path, blob_name, callback=None):
"""Download a file or directory to `dest_path` to azure blob `blob_name`.
Warning! If directory is downloaded the `dest_path` is the parent directory.
Upload progress can be traced by an optional callback.
"""
download_done = Event()
def progress_callback(current, total):
if callback:
callback(current, total)
if current >= total:
download_done.set()
with tempfile.TemporaryDirectory() as td:
arcpath = os.path.join(td, "archive.zip")
for backup_blob_name in [blob_name, blob_name + '.backup']:
try:
properties = self._service.get_blob_properties(
blob_name=backup_blob_name,
container_name=self._container_name
)
if hasattr(properties, 'properties'):
# Annoyingly, Azure has changed the API and this now returns a blob
# instead of it's properties with up-to-date azure package.
blob_size = properties.properties.content_length
else:
blob_size = properties['content-length']
if int(blob_size) > 0:
self._service.get_blob_to_path(
container_name=self._container_name,
blob_name=backup_blob_name,
file_path=arcpath,
max_connections=4,
progress_callback=progress_callback)
unpack_archive(arcpath, dest_path)
download_done.wait()
return True
except AzureMissingResourceHttpError:
pass
return False
def list(self, prefix=None):
"""List all blobs in the container."""
return fixed_list_blobs(self._service, self._container_name, prefix=prefix)
def exists(self, blob_name):
"""Returns true if `blob_name` exists in container."""
try:
self._service.get_blob_properties(
blob_name=blob_name,
container_name=self._container_name
)
return True
except AzureMissingResourceHttpError:
return False
|
{
"content_hash": "611749edadb183f7beaf70f7631c4f5b",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 91,
"avg_line_length": 38.714285714285715,
"alnum_prop": 0.5699429721569943,
"repo_name": "brain-research/mirage-rl-bpttv",
"id": "76380f4f66e5d7b53a0db82251ffc93407056a0c",
"size": "5962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baselines/common/azure_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "415760"
},
{
"name": "Shell",
"bytes": "518"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_kaadu_hue.iff"
result.attribute_template_id = 9
result.stfName("monster_name","kaadu")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/player_pet.py")
result.options_mask = 0x100
result.pvp_status = PVPSTATUS.PvPStatus_None
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "ac1bf8e016d55d2e83234ea287d9b61e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 26.8125,
"alnum_prop": 0.7202797202797203,
"repo_name": "anhstudios/swganh",
"id": "a5735756583f887da380e89bbe90e591b133250b",
"size": "574",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_kaadu_hue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import unittest2
from exam.decorators import fixture
from mock import Mock
from expect.core.args import AnyArgs
from expect.core.args import Args
from expect.core.expectation import ShouldReceiveExpectation
from expect.core.stub import Stub
from expect.core.test_environment import TestEnvironment
class TestEnvironmentTestCase(unittest2.TestCase):
obj = fixture(Mock, name='obj')
test_environment = fixture(TestEnvironment)
def test_sets_up_and_resets_stubs(self):
stub = Stub('stub')
stub.set_default_response('response')
original_methods = [self.obj.method1, self.obj.method2]
self.test_environment.add_stub(self.obj, 'method1', stub)
self.test_environment.add_stub(self.obj, 'method2', stub)
self.assertEqual('response', self.obj.method1('any', 'args'))
self.assertEqual('response', self.obj.method2('any', 'args'))
self.test_environment.reset()
self.assertEqual(original_methods, [self.obj.method1, self.obj.method2])
def test_verifies_mock_expectations(self):
stub = Stub('stub')
stub.set_default_response('response')
passing_expectation = ShouldReceiveExpectation(stub, AnyArgs)
failing_expectation = ShouldReceiveExpectation(stub,
Args.make('some args'))
self.test_environment.add_mock_expectation(passing_expectation)
self.test_environment.add_mock_expectation(failing_expectation)
stub('random args')
try:
self.test_environment.verify_expectations()
except AssertionError, e:
self.assertEqual("Expected stub('some args') to be called but it "
"wasn't.", str(e))
else:
raise AssertionError('expected AssertionError')
def test_resets_mock_expectations(self):
stub = Stub('stub')
stub.set_default_response('response')
expectation = ShouldReceiveExpectation(stub, AnyArgs)
self.test_environment.add_mock_expectation(expectation)
self.test_environment.reset()
self.test_environment.verify_expectations()
def test_can_reset_multiple_times(self):
self.test_environment.add_stub(self.obj, 'method', Stub('stub'))
self.test_environment.reset()
self.test_environment.reset()
def test_resets_args_called_on_stubs(self):
stub = Stub('stub')
stub.set_default_response('response')
self.test_environment.add_stub(self.obj, 'method', stub)
stub(123)
self.test_environment.reset()
self.assertEqual([], stub.was_called_with)
|
{
"content_hash": "b6a3344339b4c5b6ffece25d634b974c",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 37.36619718309859,
"alnum_prop": 0.6607614021862043,
"repo_name": "sumeet/expect",
"id": "bb6258d237599483b1e230c8b5eadf43a85695de",
"size": "2653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_environment_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25623"
}
],
"symlink_target": ""
}
|
import re
from .output_formatter_style import OutputFormatterStyle
from .output_formatter_style_stack import OutputFormatterStyleStack
class OutputFormatter(object):
FORMAT_PATTERN = '(?isx)(\\\\?)<(/?)([a-z][a-z0-9_=;-]*)?>((?: [^<\\\\]+ | (?!<(?:/?[a-z]|/>)). | .(?<=\\\\<) )*)'
def __init__(self, decorated=False, styles=None):
self.__decorated = bool(decorated)
styles = styles or {}
self.__styles = {}
self.set_style('error', OutputFormatterStyle('white', 'red'))
self.set_style('info', OutputFormatterStyle('green'))
self.set_style('comment', OutputFormatterStyle('yellow'))
self.set_style('question', OutputFormatterStyle('black', 'cyan'))
for name, style in styles.items():
self.set_style(name, style)
self.__style_stack = OutputFormatterStyleStack()
@classmethod
def escape(cls, text):
return re.sub('(?is)([^\\\\]?)<', '\\1\\<', text)
def set_decorated(self, decorated):
self.__decorated = bool(decorated)
def is_decorated(self):
return self.__decorated
def set_style(self, name, style):
self.__styles[name] = style
def has_style(self, name):
return name in self.__styles
def get_style(self, name):
if self.has_style(name):
return self.__styles[name]
def format(self, message):
message = re.sub(self.FORMAT_PATTERN, self.replace_style, message)
return message.replace('\\<', '<')
def replace_style(self, match):
# we got "\<" escaped char
if match.group(1) == '\\':
return self.apply_current_style(match.group(0))
if not match.group(3):
if match.group(2) == '/':
# we got "</>" tag
self.__style_stack.pop()
return self.apply_current_style(match.group(4))
# we got "<>" tag
return '<>' + self.apply_current_style(match.group(4))
if match.group(3).lower() in self.__styles:
style = self.__styles[match.group(3).lower()]
else:
style = self.create_style_from_string(match.group(3))
if style is False:
return self.apply_current_style(match.group(0))
if match.group(2) == '/':
self.__style_stack.pop(style)
else:
self.__style_stack.push(style)
return self.apply_current_style(match.group(4))
def create_style_from_string(self, string):
matches = re.findall('([^=]+)=([^;]+)(;|$)', string.lower())
if not len(matches):
return False
style = OutputFormatterStyle()
for match in matches:
if match[0] == 'fg':
style.set_foreground(match[1])
elif match[0] == 'bg':
style.set_background(match[1])
else:
style.set_option(match[1])
return style
def apply_current_style(self, text):
if self.is_decorated() and len(text):
return self.__style_stack.get_current().apply(text)
else:
return text
|
{
"content_hash": "93728a2fa53795bb2047560e811331bc",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 118,
"avg_line_length": 30.475728155339805,
"alnum_prop": 0.5466709143039185,
"repo_name": "Romibuzi/cleo",
"id": "abeaf9ba364c33fb830f85ef26b44c7380a0b6b5",
"size": "3164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleo/formatters/output_formatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "284394"
},
{
"name": "Shell",
"bytes": "806"
}
],
"symlink_target": ""
}
|
import os
import logging
import logging.handlers
log = logging.getLogger('imc')
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
def enable_file_logging(filename="imcsdk.log"):
file_handler = logging.handlers.RotatingFileHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
log.addHandler(file_handler)
def set_log_level(level=logging.DEBUG):
"""
Allows setting log level
Args:
level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)
Returns:
None
Example:
from imcsdk import set_log_level
import logging
set_log_level(logging.INFO)
"""
log.setLevel(level)
console.setLevel(level)
set_log_level(logging.DEBUG)
log.addHandler(console)
if os.path.exists('/tmp/imcsdk_debug'):
enable_file_logging()
__author__ = 'Cisco Systems'
__email__ = 'ucs-python@cisco.com'
__version__ = '0.9.3.1'
|
{
"content_hash": "bfce351d046f838ffeb5171e986dd655",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 92,
"avg_line_length": 22.52173913043478,
"alnum_prop": 0.6708494208494209,
"repo_name": "ragupta-git/ImcSdk",
"id": "f6cfa18418a4236f467e39d427e1314034bf62fe",
"size": "1616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imcsdk/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1042023"
}
],
"symlink_target": ""
}
|
""" Module that holds a general kervi application module threading class"""
from kervi.utility.thread import KerviThread
from kervi.spine import Spine
class ModuleThread(KerviThread):
def __init__(self):
KerviThread.__init__(self)
self.spine = Spine()
self.spine.register_command_handler("startThreads", self._startCommand)
def _step(self):
self.moduleStep()
def _startCommand(self):
if not self.isAlive():
super(KerviThread, self).start()
def _stopCommand(self):
self.stop()
|
{
"content_hash": "34cd7bc1f6a9cbf7fb1186f43ec320ef",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 27.85,
"alnum_prop": 0.6552962298025135,
"repo_name": "kervi/kervi",
"id": "6dbe9e54dc6becda375a76ee24876a575bafc562",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kervi-core/kervi/core/utility/module_thread.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "389"
},
{
"name": "CSS",
"bytes": "610125"
},
{
"name": "HTML",
"bytes": "2998420"
},
{
"name": "JavaScript",
"bytes": "16183042"
},
{
"name": "Python",
"bytes": "954284"
},
{
"name": "Shell",
"bytes": "557"
},
{
"name": "TypeScript",
"bytes": "286601"
}
],
"symlink_target": ""
}
|
"""
Some models for pulling data from Trac.
Initially generated by inspectdb then modified heavily by hand, often by
consulting http://trac.edgewall.org/wiki/TracDev/DatabaseSchema.
These are far from perfect: many (most?) Trac tables have composite primary
keys, which Django can't represent. This means a lot of built-in Django stuff
(the admin, for example) won't work at all with these models. I haven't
investigated just how deeply down thess failures go, but I suspect all sorts
of things just won't work.
However, they're Good Enough(tm) to let me pull some basic (read-only) data out,
and that's all I really need.
Some potential TODOs:
* Add some convienance manager functions to deal with ticket_custom. Right
now you can query with a join::
Ticket.objects.filter(custom_fields__name='ui_ux',
custom_fields__value='1')
Perhaps we might be able to get something like::
Ticket.objects.with_custom(ui_ux=True)
Or even a custom .filter() that intercepts and figures it out?
* Trac stores SVN repository revisions as '0000003744' grar. This
makes querying awkward. There's probably some tricky manager manger
that we could do here.
* The whole Revision model will fall apart if we ever had a second
repository to Trac.
And a few notes on tables that're left out and why:
* All the session and permission tables: they're just not needd.
* Enum: I don't know what this is or what it's for.
* NodeChange: Ditto.
"""
import datetime
from django.db import models
from django.utils.tzinfo import FixedOffset
_epoc = datetime.datetime(1970, 1, 1, tzinfo=FixedOffset(0))
class time_property(object):
"""
Convert Trac timestamps into UTC datetimes.
See http://trac.edgewall.org/browser//branches/0.12-stable/trac/util/datefmt.py
for Trac's version of all this. Mine's something of a simplification.
Like the rest of this module this is far from perfect -- no setters, for
example! That's good enough for now.
"""
def __init__(self, fieldname):
self.fieldname = fieldname
def __get__(self, instance, owner):
if instance is None:
return self
timestamp = getattr(instance, self.fieldname)
return _epoc + datetime.timedelta(microseconds=timestamp)
class Ticket(models.Model):
id = models.IntegerField(primary_key=True)
type = models.TextField()
_time = models.BigIntegerField(db_column='time')
time = time_property('_time')
_changetime = models.BigIntegerField(db_column='changetime')
changetime = time_property('_changetime')
component = models.ForeignKey('Component', related_name='tickets', db_column='component')
severity = models.TextField()
owner = models.TextField()
reporter = models.TextField()
cc = models.TextField()
version = models.ForeignKey('Version', related_name='tickets', db_column='version')
milestone = models.ForeignKey('Milestone', related_name='tickets', db_column='milestone')
priority = models.TextField()
status = models.TextField()
resolution = models.TextField()
summary = models.TextField()
description = models.TextField()
keywords = models.TextField()
class Meta(object):
db_table = 'ticket'
managed = False
def __unicode__(self):
return "#%s: %s" % (self.id, self.summary)
def __init__(self, *args, **kwargs):
super(Ticket, self).__init__(*args, **kwargs)
# Munge custom fields onto this object. This sucks since it implies
# querying will work (it won't!) and that writing will work (ditto).
# Also notice that *nasty* mapping of Trac's "booleanish" things to
# real booleans. This can fail in a bunch of ways, but not in our
# particular install.
for name, value in self.custom_fields.values_list('name', 'value'):
if value in ('0', '1'):
value = bool(int(value))
setattr(self, name, value)
class TicketCustom(models.Model):
ticket = models.ForeignKey(Ticket, related_name='custom_fields', db_column='ticket', primary_key=True)
name = models.TextField()
value = models.TextField()
class Meta(object):
db_table = 'ticket_custom'
managed = False
def __unicode__(self):
return "%s: %s" % (self.name, self.value)
class TicketChange(models.Model):
ticket = models.ForeignKey(Ticket, related_name='changes', db_column='ticket', primary_key=True)
author = models.TextField()
field = models.TextField()
oldvalue = models.TextField()
newvalue = models.TextField()
_time = models.BigIntegerField(db_column='time')
time = time_property('_time')
class Meta(object):
db_table = 'ticket_change'
managed = False
ordering = ['_time']
def __unicode__(self):
return "#%s: changed %s" % (self.ticket.id, self.field)
class Component(models.Model):
name = models.TextField(primary_key=True)
owner = models.TextField()
description = models.TextField()
class Meta(object):
db_table = 'component'
managed = False
def __unicode__(self):
return self.name
class Version(models.Model):
name = models.TextField(primary_key=True)
description = models.TextField()
_time = models.BigIntegerField(db_column='time')
time = time_property('_time')
class Meta(object):
db_table = 'version'
managed = False
def __unicode__(self):
return self.name
class Milestone(models.Model):
name = models.TextField(primary_key=True)
description = models.TextField()
_due = models.BigIntegerField(db_column='_due')
due = time_property('due')
_completed = models.BigIntegerField(db_column='_completed')
completed = time_property('completed')
class Meta(object):
db_table = 'milestone'
managed = False
def __unicode__(self):
return self.name
class SingleRepoRevisionManager(models.Manager):
"""
Forces Revision to only query against a single repo, thus making
Revision.rev behave something like a primary key.
"""
def __init__(self, repo_id):
self.repo_id = repo_id
super(SingleRepoRevisionManager, self).__init__()
def get_queryset(self):
qs = super(SingleRepoRevisionManager, self).get_queryset()
return qs.filter(repos=self.repo_id)
SINGLE_REPO_ID = 1
class Revision(models.Model):
repos = models.IntegerField()
rev = models.TextField(primary_key=True)
_time = models.BigIntegerField(db_column='time')
time = time_property('time')
author = models.TextField()
message = models.TextField()
objects = SingleRepoRevisionManager(repo_id=SINGLE_REPO_ID)
class Meta(object):
db_table = 'revision'
managed = False
def __unicode__(self):
return '[%s] %s' % (self.rev, self.message.split('\n', 1)[0])
# The Wiki table uses a composite primary key (name, version). Since
# Django doesn't support this, this model sits on top of a simple view.
# CREATE VIEW "wiki_django_view" AS
# SELECT "name" || '.' || "version" AS "django_id", *
# FROM wiki;
class Wiki(models.Model):
django_id = models.TextField(primary_key=True)
name = models.TextField()
version = models.IntegerField()
_time = models.BigIntegerField(db_column='time')
time = time_property('time')
author = models.TextField()
ipnr = models.TextField()
text = models.TextField()
comment = models.TextField()
readonly = models.IntegerField()
class Meta:
db_table = 'wiki_django_view'
managed = False
def __unicode__(self):
return '%s (v%s)' % (self.name, self.version)
# Same story as for Wiki: attachment's PK is (type, id, filename), so again
# there's a simple view this is on top of.
# CREATE VIEW "attachment_django_view" AS
# SELECT "type" || '.' || "id" || '.' || "filename" AS "django_id", *
# FROM attachment;
class Attachment(models.Model):
django_id = models.TextField(primary_key=True)
type = models.TextField()
id = models.TextField()
filename = models.TextField()
size = models.IntegerField()
_time = models.BigIntegerField(db_column='time')
time = time_property('time')
description = models.TextField()
author = models.TextField()
ipnr = models.TextField()
class Meta:
db_table = 'attachment_django_view'
managed = False
def __unicode__(self):
attached_to = ('#%s' % self.id) if self.type == 'ticket' else self.id
return '%s (on %s)' % (self.filename, attached_to)
|
{
"content_hash": "a2a1345575cee4ae8de1ba7da94d8765",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 106,
"avg_line_length": 30.929078014184398,
"alnum_prop": 0.6536344875028663,
"repo_name": "alawnchen/djangoproject.com",
"id": "ed6c4e817eebe30dcbc5a363e46c00d4a7ada49e",
"size": "8722",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tracdb/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133070"
},
{
"name": "CoffeeScript",
"bytes": "24188"
},
{
"name": "HTML",
"bytes": "216657"
},
{
"name": "JavaScript",
"bytes": "802988"
},
{
"name": "Makefile",
"bytes": "1628"
},
{
"name": "Python",
"bytes": "499809"
},
{
"name": "Ruby",
"bytes": "19821"
},
{
"name": "Smalltalk",
"bytes": "1917"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "codery.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "fd4ea7a936cba77cc10ed47a5beb8790",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "inducer/codery",
"id": "ec4b46af971c884651339564e3a07b7877ac962d",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "12477"
},
{
"name": "Python",
"bytes": "101426"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from zipfile import ZipFile
from django.core.files.uploadedfile import SimpleUploadedFile
def unzip(file_obj):
"""
Take a path to a zipfile and checks if it is a valid zip file
and returns...
"""
files = []
# TODO: implement try-except here
zip = ZipFile(file_obj)
bad_file = zip.testzip()
if bad_file:
raise Exception('"%s" in the .zip archive is corrupt.' % bad_file)
infolist = zip.infolist()
for zipinfo in infolist:
if zipinfo.filename.startswith('__'): # do not process meta files
continue
file_obj = SimpleUploadedFile(name=zipinfo.filename, content=zip.read(zipinfo))
files.append((file_obj, zipinfo.filename))
zip.close()
return files
|
{
"content_hash": "0ea7a00b228c1a2ad5cf4dc1ef06104f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 30.153846153846153,
"alnum_prop": 0.6543367346938775,
"repo_name": "skirsdeda/django-filer",
"id": "5cc90836ae649408699af4335115a5b65b2b4e86",
"size": "809",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "filer/utils/zip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91544"
},
{
"name": "HTML",
"bytes": "80942"
},
{
"name": "JavaScript",
"bytes": "58948"
},
{
"name": "Python",
"bytes": "346663"
},
{
"name": "Ruby",
"bytes": "1119"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/2h_sword/crafted_saber/shared_sword_lightsaber_two_handed_s11_gen3.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_2h_type11")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b4e28f385bd1ce5edbb70af2f6b39cc2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 111,
"avg_line_length": 27.53846153846154,
"alnum_prop": 0.7178770949720671,
"repo_name": "anhstudios/swganh",
"id": "c2bad136c36ae6c878bf467a4828b1f38d3db906",
"size": "503",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/weapon/melee/2h_sword/crafted_saber/shared_sword_lightsaber_two_handed_s11_gen3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from functools import partial
from nose.tools import eq_
from bleach import clean
clean = partial(clean, tags=['p'], attributes=['style'])
def test_allowed_css():
tests = (
('font-family: Arial; color: red; float: left; '
'background-color: red;', 'color: red;', ['color']),
('border: 1px solid blue; color: red; float: left;', 'color: red;',
['color']),
('border: 1px solid blue; color: red; float: left;',
'color: red; float: left;', ['color', 'float']),
('color: red; float: left; padding: 1em;', 'color: red; float: left;',
['color', 'float']),
('color: red; float: left; padding: 1em;', 'color: red;', ['color']),
('cursor: -moz-grab;', 'cursor: -moz-grab;', ['cursor']),
('color: hsl(30,100%,50%);', 'color: hsl(30,100%,50%);', ['color']),
('color: rgba(255,0,0,0.4);', 'color: rgba(255,0,0,0.4);', ['color']),
("text-overflow: ',' ellipsis;", "text-overflow: ',' ellipsis;",
['text-overflow']),
('text-overflow: "," ellipsis;', 'text-overflow: "," ellipsis;',
['text-overflow']),
('font-family: "Arial";', 'font-family: "Arial";', ['font-family']),
)
p_single = '<p style="%s">bar</p>'
p_double = "<p style='%s'>bar</p>"
def check(i, o, s):
if '"' in i:
eq_(p_double % o, clean(p_double % i, styles=s))
else:
eq_(p_single % o, clean(p_single % i, styles=s))
for i, o, s in tests:
yield check, i, o, s
def test_valid_css():
"""The sanitizer should fix missing CSS values."""
styles = ['color', 'float']
eq_('<p style="float: left;">foo</p>',
clean('<p style="float: left; color: ">foo</p>', styles=styles))
eq_('<p style="">foo</p>',
clean('<p style="color: float: left;">foo</p>', styles=styles))
def test_style_hang():
"""The sanitizer should not hang on any inline styles"""
# TODO: Neaten this up. It's copypasta from MDN/Kuma to repro the bug
style = ("""margin-top: 0px; margin-right: 0px; margin-bottom: 1.286em; """
"""margin-left: 0px; padding-top: 15px; padding-right: 15px; """
"""padding-bottom: 15px; padding-left: 15px; border-top-width: """
"""1px; border-right-width: 1px; border-bottom-width: 1px; """
"""border-left-width: 1px; border-top-style: dotted; """
"""border-right-style: dotted; border-bottom-style: dotted; """
"""border-left-style: dotted; border-top-color: rgb(203, 200, """
"""185); border-right-color: rgb(203, 200, 185); """
"""border-bottom-color: rgb(203, 200, 185); border-left-color: """
"""rgb(203, 200, 185); background-image: initial; """
"""background-attachment: initial; background-origin: initial; """
"""background-clip: initial; background-color: """
"""rgb(246, 246, 242); overflow-x: auto; overflow-y: auto; """
"""font: normal normal normal 100%/normal 'Courier New', """
"""'Andale Mono', monospace; background-position: initial """
"""initial; background-repeat: initial initial;""")
html = '<p style="%s">Hello world</p>' % style
styles = [
'border', 'float', 'overflow', 'min-height', 'vertical-align',
'white-space',
'margin', 'margin-left', 'margin-top', 'margin-bottom', 'margin-right',
'padding', 'padding-left', 'padding-top', 'padding-bottom', 'padding-right',
'background',
'background-color',
'font', 'font-size', 'font-weight', 'text-align', 'text-transform',
]
expected = ("""<p style="margin-top: 0px; margin-right: 0px; """
"""margin-bottom: 1.286em; margin-left: 0px; padding-top: """
"""15px; padding-right: 15px; padding-bottom: 15px; """
"""padding-left: 15px; background-color: """
"""rgb(246, 246, 242); font: normal normal normal """
"""100%/normal 'Courier New', 'Andale Mono', monospace;">"""
"""Hello world</p>""")
result = clean(html, styles=styles)
eq_(expected, result)
|
{
"content_hash": "8af66843eb212e05794e2301496432cc",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 84,
"avg_line_length": 45.1505376344086,
"alnum_prop": 0.5398904501071684,
"repo_name": "kiawin/bleach",
"id": "588c8ce154cf83292c37f5c475efcb958863dbff",
"size": "4199",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "bleach/tests/test_css.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "58030"
}
],
"symlink_target": ""
}
|
"""websocket cmd client for wssrv.py example."""
import argparse
import base64
import hashlib
import os
import signal
import sys
import asyncio
try:
import selectors
except ImportError:
from asyncio import selectors
import aiohttp
from aiohttp import websocket
WS_KEY = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
def start_client(loop, url):
name = input('Please enter your name: ').encode()
sec_key = base64.b64encode(os.urandom(16))
# send request
response = yield from aiohttp.request(
'get', url,
headers={
'UPGRADE': 'WebSocket',
'CONNECTION': 'Upgrade',
'SEC-WEBSOCKET-VERSION': '13',
'SEC-WEBSOCKET-KEY': sec_key.decode(),
})
# websocket handshake
if response.status != 101:
raise ValueError("Handshake error: Invalid response status")
if response.headers.get('upgrade', '').lower() != 'websocket':
raise ValueError("Handshake error - Invalid upgrade header")
if response.headers.get('connection', '').lower() != 'upgrade':
raise ValueError("Handshake error - Invalid connection header")
key = response.headers.get('sec-websocket-accept', '').encode()
match = base64.b64encode(hashlib.sha1(sec_key + WS_KEY).digest())
if key != match:
raise ValueError("Handshake error - Invalid challenge response")
# switch to websocket protocol
connection = response.connection
stream = connection.reader.set_parser(websocket.WebSocketParser)
writer = websocket.WebSocketWriter(connection.writer)
# input reader
def stdin_callback():
line = sys.stdin.buffer.readline()
if not line:
loop.stop()
else:
writer.send(name + b': ' + line)
loop.add_reader(sys.stdin.fileno(), stdin_callback)
@asyncio.coroutine
def dispatch():
while True:
try:
msg = yield from stream.read()
except:
# server disconnected
break
if msg.tp == websocket.MSG_PING:
writer.pong()
elif msg.tp == websocket.MSG_TEXT:
print(msg.data.strip())
elif msg.tp == websocket.MSG_CLOSE:
break
yield from dispatch()
ARGS = argparse.ArgumentParser(
description="websocket console client for wssrv.py example.")
ARGS.add_argument(
'--host', action="store", dest='host',
default='127.0.0.1', help='Host name')
ARGS.add_argument(
'--port', action="store", dest='port',
default=8080, type=int, help='Port number')
if __name__ == '__main__':
args = ARGS.parse_args()
if ':' in args.host:
args.host, port = args.host.split(':', 1)
args.port = int(port)
url = 'http://{}:{}'.format(args.host, args.port)
loop = asyncio.SelectorEventLoop(selectors.SelectSelector())
asyncio.set_event_loop(loop)
loop.add_signal_handler(signal.SIGINT, loop.stop)
asyncio.Task(start_client(loop, url))
loop.run_forever()
|
{
"content_hash": "d957474933a0367b0a716f2131e58acb",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 72,
"avg_line_length": 29.201923076923077,
"alnum_prop": 0.6187026671056964,
"repo_name": "saghul/aiohttp",
"id": "3a75a28fc92766651ff4b2726260962fc4866bd4",
"size": "3060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/wsclient.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7854"
},
{
"name": "Python",
"bytes": "629196"
},
{
"name": "Shell",
"bytes": "6703"
}
],
"symlink_target": ""
}
|
"""Minor cleanup
Revision ID: 182eb89ec642
Revises: 2614c3bbec2a
Create Date: 2014-11-19 15:09:45.727348
"""
# revision identifiers, used by Alembic.
import base64
import uuid
revision = '182eb89ec642'
down_revision = '2614c3bbec2a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
def upgrade():
# Generate a PID for those Specs that don't have one.
connection = op.get_bind()
ids = connection.execute("SELECT id FROM Specs WHERE pid IS NULL")
for id, in ids:
# Generate a not-too-long unique and permanent id.
uid = base64.urlsafe_b64encode(uuid.uuid4().bytes[0:15])
connection.execute(text("UPDATE Specs SET pid=:pid WHERE id=:id"), pid=uid, id=id)
### commands auto generated by Alembic - please adjust! ###
try:
op.drop_column('Apps', u'spec_url')
op.alter_column('Specs', 'pid',
existing_type=sa.VARCHAR(length=60),
nullable=False)
except:
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('Specs', 'pid',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.add_column('Apps', sa.Column(u'spec_url', sa.VARCHAR(length=600), nullable=True))
### end Alembic commands ###
|
{
"content_hash": "eaee0f022847e62b5d9abdd44503eb23",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 90,
"avg_line_length": 26.647058823529413,
"alnum_prop": 0.6401766004415012,
"repo_name": "morelab/appcomposer",
"id": "081ed3a87d469f750e955f8b6c838cd95ac97df6",
"size": "1359",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "alembic/versions/182eb89ec642_minor_cleanup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "16023"
},
{
"name": "HTML",
"bytes": "116481"
},
{
"name": "JavaScript",
"bytes": "164929"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "552296"
},
{
"name": "Shell",
"bytes": "1436"
}
],
"symlink_target": ""
}
|
import unittest
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class CustomAttributesTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
sphere = IECoreScene.SpherePrimitive()
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"ball1" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
},
"ball2" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
},
},
} )
)
a = GafferScene.CustomAttributes()
a["in"].setInput( input["out"] )
# should be no attributes until we've specified any
self.assertEqual( a["out"].attributes( "/" ), IECore.CompoundObject() )
self.assertEqual( a["out"].attributes( "/ball1" ), IECore.CompoundObject() )
self.assertEqual( a["out"].attributes( "/ball2" ), IECore.CompoundObject() )
# when we specify some, they should be applied to everything because
# we haven't specified a filter yet. but not to the root because it
# isn't allowed attributes.
a["attributes"].addChild( Gaffer.NameValuePlug( "ri:shadingRate", IECore.FloatData( 0.25 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual( a["out"].attributes( "/" ), IECore.CompoundObject() )
self.assertEqual( a["out"].attributes( "/ball1" ), IECore.CompoundObject( { "ri:shadingRate" : IECore.FloatData( 0.25 ) } ) )
self.assertEqual( a["out"].attributes( "/ball2" ), IECore.CompoundObject( { "ri:shadingRate" : IECore.FloatData( 0.25 ) } ) )
# finally once we've applied a filter, we should get some attributes.
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/ball1" ] ) )
a["filter"].setInput( f["out"] )
self.assertEqual( a["out"].attributes( "/" ), IECore.CompoundObject() )
self.assertEqual( a["out"].attributes( "/ball1" ), IECore.CompoundObject( { "ri:shadingRate" : IECore.FloatData( 0.25 ) } ) )
self.assertEqual( a["out"].attributes( "/ball2" ), IECore.CompoundObject() )
def testOverrideAttributes( self ) :
sphere = IECoreScene.SpherePrimitive()
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"ball1" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
},
},
} )
)
a = GafferScene.CustomAttributes()
a["in"].setInput( input["out"] )
a["attributes"].addChild( Gaffer.NameValuePlug( "ri:shadingRate", IECore.FloatData( 0.25 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
a["attributes"].addChild( Gaffer.NameValuePlug( "user:something", IECore.IntData( 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual(
a["out"].attributes( "/ball1" ),
IECore.CompoundObject( {
"ri:shadingRate" : IECore.FloatData( 0.25 ),
"user:something" : IECore.IntData( 1 ),
} )
)
a2 = GafferScene.CustomAttributes()
a2["in"].setInput( a["out"] )
self.assertEqual(
a2["out"].attributes( "/ball1" ),
IECore.CompoundObject( {
"ri:shadingRate" : IECore.FloatData( 0.25 ),
"user:something" : IECore.IntData( 1 ),
} )
)
a2["attributes"].addChild( Gaffer.NameValuePlug( "ri:shadingRate", IECore.FloatData( .5 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
a2["attributes"].addChild( Gaffer.NameValuePlug( "user:somethingElse", IECore.IntData( 10 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual(
a2["out"].attributes( "/ball1" ),
IECore.CompoundObject( {
"ri:shadingRate" : IECore.FloatData( 0.5 ),
"user:something" : IECore.IntData( 1 ),
"user:somethingElse" : IECore.IntData( 10 ),
} )
)
def testHashPassThrough( self ) :
sphere = IECoreScene.SpherePrimitive()
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"ball1" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
},
"ball2" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
},
},
} )
)
a = GafferScene.CustomAttributes()
a["in"].setInput( input["out"] )
# when we have no attributes at all, everything should be a pass-through
self.assertSceneHashesEqual( input["out"], a["out"] )
# when we have some attributes, everything except the attributes plug should
# be a pass-through.
a["attributes"].addChild( Gaffer.NameValuePlug( "ri:shadingRate", IECore.FloatData( 2.0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertSceneHashesEqual( input["out"], a["out"], checks = self.allSceneChecks - { "attributes" } )
self.assertSceneHashesNotEqual( input["out"], a["out"], checks = { "attributes" } )
# when we add a filter, non-matching objects should become pass-throughs
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/ball1" ] ) )
a["filter"].setInput( f["out"] )
self.assertSceneHashesEqual( input["out"], a["out"], pathsToIgnore = ( "/ball1", ) )
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( [ "ball1" ] )
with c :
self.assertEqual( a["out"]["childNames"].hash(), input["out"]["childNames"].hash() )
self.assertEqual( a["out"]["transform"].hash(), input["out"]["transform"].hash() )
self.assertEqual( a["out"]["bound"].hash(), input["out"]["bound"].hash() )
self.assertEqual( a["out"]["object"].hash(), input["out"]["object"].hash() )
self.assertNotEqual( a["out"]["attributes"].hash(), input["out"]["attributes"].hash() )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferScene.CustomAttributes()
s["a"]["attributes"].addChild( Gaffer.NameValuePlug( "ri:shadingRate", IECore.FloatData( 1.0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["a"]["attributes"] ), 1 )
self.assertTrue( "attributes1" not in s2["a"] )
def testBoxPromotion( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferScene.StandardAttributes()
s["a"]["attributes"]["deformationBlur"]["enabled"].setValue( True )
s["a"]["attributes"]["deformationBlur"]["value"].setValue( False )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["a"] ] ) )
self.assertTrue( Gaffer.PlugAlgo.canPromote( b["a"]["attributes"]["deformationBlur"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( b["a"]["attributes"]["deformationBlur"] ) )
p = Gaffer.PlugAlgo.promote( b["a"]["attributes"]["deformationBlur"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( b["a"]["attributes"]["deformationBlur"] ) )
self.assertTrue( b["a"]["attributes"]["deformationBlur"].getInput().isSame( p ) )
self.assertTrue( b["a"]["attributes"]["deformationBlur"]["name"].getInput().isSame( p["name"] ) )
self.assertTrue( b["a"]["attributes"]["deformationBlur"]["enabled"].getInput().isSame( p["enabled"] ) )
self.assertTrue( b["a"]["attributes"]["deformationBlur"]["value"].getInput().isSame( p["value"] ) )
self.assertEqual( p["enabled"].getValue(), True )
self.assertEqual( p["value"].getValue(), False )
def testDisconnectDoesntRetainFilterValue( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["f"] = GafferScene.PathFilter()
s["a"] = GafferScene.CustomAttributes()
s["a"]["attributes"].addChild( Gaffer.NameValuePlug( "user:test", IECore.IntData( 10 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertTrue( "user:test" in s["a"]["out"].attributes( "/plane" ) )
s["a"]["filter"].setInput( s["f"]["out"] )
self.assertFalse( "user:test" in s["a"]["out"].attributes( "/plane" ) )
s["a"]["filter"].setInput( None )
self.assertTrue( "user:test" in s["a"]["out"].attributes( "/plane" ) )
def testCopyPasteDoesntRetainFilterValue( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["f"] = GafferScene.PathFilter()
s["a"] = GafferScene.CustomAttributes()
s["a"]["attributes"].addChild( Gaffer.NameValuePlug( "user:test", IECore.IntData( 10 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertTrue( "user:test" in s["a"]["out"].attributes( "/plane" ) )
s["a"]["filter"].setInput( s["f"]["out"] )
self.assertFalse( "user:test" in s["a"]["out"].attributes( "/plane" ) )
ss = s.serialise( filter = Gaffer.StandardSet( [ s["p"], s["a"] ] ) )
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertTrue( "f" not in s )
self.assertTrue( "user:test" in s["a"]["out"].attributes( "/plane" ) )
def testOutPlugNotSerialised( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferScene.CustomAttributes()
ss = s.serialise()
self.assertFalse( "out" in ss )
def testAffects( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferScene.CustomAttributes()
p = Gaffer.NameValuePlug( "user:test", 10, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["a"]["attributes"].addChild( p )
self.assertEqual( set( s["a"].affects( p["value"] ) ), set( [ s["a"]["out"]["attributes"] ] ) )
self.assertEqual( set( s["a"].affects( s["a"]["extraAttributes"] ) ), set( [ s["a"]["out"]["attributes"] ] ) )
s["a"]["global"].setValue( True )
self.assertEqual( set( s["a"].affects( p["value"] ) ), set( [ s["a"]["out"]["globals"] ] ) )
self.assertEqual( set( s["a"].affects( s["a"]["extraAttributes"] ) ), set( [ s["a"]["out"]["globals"] ] ) )
s["e"] = Gaffer.Expression()
s["e"].setExpression( """parent["a"]["global"] = context.getFrame() > 10""" )
self.assertEqual( set( s["a"].affects( p["value"] ) ), set( [ s["a"]["out"]["attributes"], s["a"]["out"]["globals"] ] ) )
def testExtraAttributes( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere()
s["a"] = GafferScene.CustomAttributes()
s["f"] = GafferScene.PathFilter()
s["f"]["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
s["a"]["filter"].setInput( s["f"]["out"] )
s["a"]["extraAttributes"].setValue(IECore.CompoundData({
"a1" : IECore.StringData( "from extra" ),
"a2" : IECore.IntData( 2 ),
}))
s["a"]["attributes"].addChild(
Gaffer.NameValuePlug( "a1", IECore.StringData( "from attributes" ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
)
s["a"]["attributes"].addChild(
Gaffer.NameValuePlug( "a3", IECore.IntData( 5 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
)
self.assertEqual(
s["a"]["out"].attributes( "/sphere" ),
IECore.CompoundObject( {
"a1" : IECore.StringData( "from extra" ),
"a2" : IECore.IntData( 2 ),
"a3" : IECore.IntData( 5 ),
} )
)
def testExtraAttributesOnlyEvaluatedForFilteredLocations( self ) :
script = Gaffer.ScriptNode()
script["grid"] = GafferScene.Grid()
script["filter"] = GafferScene.PathFilter()
script["filter"]["paths"].setValue( IECore.StringVectorData( [ "/grid" ] ) )
script["customAttributes"] = GafferScene.CustomAttributes()
script["customAttributes"]["in"].setInput( script["grid"]["out"] )
script["customAttributes"]["filter"].setInput( script["filter"]["out"] )
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( """parent["customAttributes"]["extraAttributes"] = IECore.CompoundData( { "a" : IECore.StringData( str( context.get( "scene:path" ) ) ) } )""" )
with Gaffer.ContextMonitor( script["expression"] ) as monitor :
GafferSceneTest.traverseScene( script["customAttributes"]["out"] )
self.assertEqual( monitor.combinedStatistics().numUniqueValues( "scene:path" ), 1 )
def testDirtyPropagation( self ) :
attributes = GafferScene.CustomAttributes()
cs = GafferTest.CapturingSlot( attributes.plugDirtiedSignal() )
# Adding or removing an attribute should dirty `out.attributes`
attributes["attributes"].addChild(
Gaffer.NameValuePlug( "test", 10, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
)
self.assertIn( attributes["out"]["attributes"], { x[0] for x in cs } )
del cs[:]
del attributes["attributes"][0]
self.assertIn( attributes["out"]["attributes"], { x[0] for x in cs } )
# And although the Dynamic flag is currently required for proper serialisation
# of CustomAttributes nodes, its absence shouldn't prevent dirty propagation.
# We hope to be able to remove the Dynamic flag completely in the future.
del cs[:]
attributes["attributes"].addChild( Gaffer.NameValuePlug( "test2", 10 ) )
self.assertIn( attributes["out"]["attributes"], { x[0] for x in cs } )
del cs[:]
del attributes["attributes"][0]
self.assertIn( attributes["out"]["attributes"], { x[0] for x in cs } )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "5f3e6dfba8cd5fe009c50b5d8c377cb8",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 182,
"avg_line_length": 38.519287833827896,
"alnum_prop": 0.653955781526847,
"repo_name": "boberfly/gaffer",
"id": "b1ce641a01854a3ea8315e32ed383fcec896ad94",
"size": "14841",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferSceneTest/CustomAttributesTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7646009"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "8002810"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
}
|
"""Contains TF-Slim code for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments. Note that the training loop uses the
tf.train.Supervisor and its managed_session in its implementation to ensure the
ability of worker processes to recover from failures.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run training.
slim.learning.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to train, TF-Slim's train loop needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. slim.learning.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=4)
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
gradient_multipliers=gradient_multipliers)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. slim.learning.create_train_op allows
a user to pass in a list of update_ops to call along with the gradient updates.
train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops)
By default, slim.learning.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, TF-Slim's
slim.batch_norm function adds the moving mean and moving variance updates to
this collection. Consequently, users who want to use slim.batch_norm will not
need to take any additional steps in order to have the moving mean and moving
variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force TF-Slim NOT to use ANY update_ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use an alternative set of update ops in addition to the default updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = slim.learning.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
TF-Slim provides a convenient mechanism for doing so:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = slim.get_model_variables()
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint': slim.get_unique_variable('var0'),
'name_var_1_in_checkpoint': slim.get_unique_variable('var1')
}
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = slim.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = slim.get_variables_to_restore(exclude=["conv"])
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values from an arbitrary
source (a text document, matlab file, etc). While this is technically feasible
using plain TensorFlow, it also results in the values of your weights being
stored in the graph. For large models, this becomes prohibitively large. TF-Slim
allows you to perform this initial assignment without having to store the values
of the initial model in the graph itself by using placeholders and a feed
dictionary:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_assign_op, init_feed_dict = slim.assign_from_values(var_names_to_values)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.training.python.training import training
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import timeline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import supervisor
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
__all__ = [
'add_gradients_summaries', 'clip_gradient_norms', 'multiply_gradients',
'create_train_op', 'train_step', 'train'
]
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
multiplier = gradient_multipliers[key]
if not isinstance(multiplier, ops.Tensor):
multiplier = constant_op.constant(multiplier, dtype=grad.dtype)
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * multiplier
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '/gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '/gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
clip_gradient_norm=0,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
gradient_multipliers=None,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then slim.variables.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
a warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.trainable_variables().
clip_gradient_norm: If greater than 0 then the gradients would be clipped
by it.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
gradient_multipliers: A dictionary of either `Variables` or `Variable` op
names to the coefficient by which the associated gradient should be
scaled.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
def transform_grads_fn(grads):
if gradient_multipliers:
with ops.name_scope('multiply_grads'):
grads = multiply_gradients(grads, gradient_multipliers)
# Clip gradients.
if clip_gradient_norm > 0:
with ops.name_scope('clip_grads'):
grads = clip_gradient_norms(grads, clip_gradient_norm)
return grads
return training.create_train_op(
total_loss=total_loss,
optimizer=optimizer,
global_step=global_step,
update_ops=update_ops,
variables_to_train=variables_to_train,
transform_grads_fn=transform_grads_fn,
summarize_gradients=summarize_gradients,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
check_numerics=check_numerics)
def _wait_for_step(sess, global_step, step):
"""Wait till the global step has reached at least 'step'.
Args:
sess: A session.
global_step: A Tensor.
step: Int. The global step to reach.
"""
while True:
if training_util.global_step(sess, global_step) >= step:
break
time.sleep(1.0)
def train_step(sess, train_op, global_step, train_step_kwargs):
"""Function that takes a gradient step and specifies whether to stop.
Args:
sess: The current session.
train_op: An `Operation` that evaluates the gradients and returns the
total loss.
global_step: A `Tensor` representing the global training step.
train_step_kwargs: A dictionary of keyword arguments.
Returns:
The total loss and a boolean indicating whether or not to stop training.
Raises:
ValueError: if 'should_trace' is in `train_step_kwargs` but `logdir` is not.
"""
start_time = time.time()
trace_run_options = None
run_metadata = None
if 'should_trace' in train_step_kwargs:
if 'logdir' not in train_step_kwargs:
raise ValueError('logdir must be present in train_step_kwargs when '
'should_trace is present')
if sess.run(train_step_kwargs['should_trace']):
trace_run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
total_loss, np_global_step = sess.run([train_op, global_step],
options=trace_run_options,
run_metadata=run_metadata)
time_elapsed = time.time() - start_time
if run_metadata is not None:
tl = timeline.Timeline(run_metadata.step_stats)
trace = tl.generate_chrome_trace_format()
trace_filename = os.path.join(train_step_kwargs['logdir'],
'tf_trace-%d.json' % np_global_step)
logging.info('Writing trace to %s', trace_filename)
file_io.write_string_to_file(trace_filename, trace)
if 'summary_writer' in train_step_kwargs:
train_step_kwargs['summary_writer'].add_run_metadata(run_metadata,
'run_metadata-%d' %
np_global_step)
if 'should_log' in train_step_kwargs:
if sess.run(train_step_kwargs['should_log']):
logging.info('global step %d: loss = %.4f (%.3f sec/step)',
np_global_step, total_loss, time_elapsed)
# TODO(nsilberman): figure out why we can't put this into sess.run. The
# issue right now is that the stop check depends on the global step. The
# increment of global step often happens via the train op, which used
# created using optimizer.apply_gradients.
#
# Since running `train_op` causes the global step to be incremented, one
# would expected that using a control dependency would allow the
# should_stop check to be run in the same session.run call:
#
# with ops.control_dependencies([train_op]):
# should_stop_op = ...
#
# However, this actually seems not to work on certain platforms.
if 'should_stop' in train_step_kwargs:
should_stop = sess.run(train_step_kwargs['should_stop'])
else:
should_stop = False
return total_loss, should_stop
_USE_DEFAULT = 0
def train(train_op,
logdir,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is
greater than 'number_of_steps'. If the value is left as None, training
proceeds indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling `tf.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.local_variables_initializer()` and `tf.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None`
to indicate that no summaries should be written. If unset, we
create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer. If the
argument is supplied, gradient updates will be synchronous. If left as
`None`, gradient updates will be asynchronous.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
if train_op is None:
raise ValueError('train_op cannot be None.')
if logdir is None:
if summary_op != _USE_DEFAULT:
raise ValueError('Cannot provide summary_op because logdir=None')
if saver is not None:
raise ValueError('Cannot provide saver because logdir=None')
if trace_every_n_steps is not None:
raise ValueError('Cannot provide trace_every_n_steps because '
'logdir=None')
if sync_optimizer is not None and startup_delay_steps > 0:
raise ValueError(
'startup_delay_steps must be zero when sync_optimizer is supplied.')
if number_of_steps is not None and number_of_steps <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
graph = graph or ops.get_default_graph()
with graph.as_default():
if global_step is None:
global_step = variables.get_or_create_global_step()
saver = saver or tf_saver.Saver()
with ops.name_scope('init_ops'):
if init_op == _USE_DEFAULT:
init_op = tf_variables.global_variables_initializer()
if ready_op == _USE_DEFAULT:
ready_op = tf_variables.report_uninitialized_variables()
if local_init_op == _USE_DEFAULT:
local_init_op = control_flow_ops.group(
tf_variables.local_variables_initializer(),
lookup_ops.tables_initializer())
if sync_optimizer is not None and isinstance(
sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
with ops.control_dependencies([local_init_op] if local_init_op is
not None else []):
if is_chief:
local_init_op = sync_optimizer.chief_init_op
else:
local_init_op = sync_optimizer.local_step_init_op
ready_for_local_init_op = sync_optimizer.ready_for_local_init_op
else:
ready_for_local_init_op = None
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
if summary_writer == _USE_DEFAULT:
summary_writer = supervisor.Supervisor.USE_DEFAULT
if is_chief and sync_optimizer is not None:
if not isinstance(sync_optimizer,
(sync_replicas_optimizer.SyncReplicasOptimizer)):
raise ValueError(
'`sync_optimizer` must be a tf.train.SyncReplicasOptimizer.')
# Need to create these BEFORE the supervisor finalizes the graph:
init_tokens_op = sync_optimizer.get_init_tokens_op()
chief_queue_runner = sync_optimizer.get_chief_queue_runner()
if train_step_kwargs == _USE_DEFAULT:
with ops.name_scope('train_step'):
train_step_kwargs = {}
if number_of_steps:
should_stop_op = math_ops.greater_equal(global_step, number_of_steps)
else:
should_stop_op = constant_op.constant(False)
train_step_kwargs['should_stop'] = should_stop_op
train_step_kwargs['should_log'] = math_ops.equal(
math_ops.mod(global_step, log_every_n_steps), 0)
if is_chief and trace_every_n_steps is not None:
train_step_kwargs['should_trace'] = math_ops.equal(
math_ops.mod(global_step, trace_every_n_steps), 0)
train_step_kwargs['logdir'] = logdir
sv = supervisor.Supervisor(
graph=graph,
is_chief=is_chief,
logdir=logdir,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
ready_op=ready_op,
summary_op=summary_op,
summary_writer=summary_writer,
global_step=global_step,
saver=saver,
save_summaries_secs=save_summaries_secs,
save_model_secs=save_interval_secs,
init_fn=init_fn)
if summary_writer is not None:
train_step_kwargs['summary_writer'] = sv.summary_writer
should_retry = True
while should_retry:
try:
should_retry = False
with sv.managed_session(
master, start_standard_services=False, config=session_config) as sess:
logging.info('Starting Session.')
if is_chief:
if logdir:
sv.start_standard_services(sess)
elif startup_delay_steps > 0:
_wait_for_step(sess, global_step,
min(startup_delay_steps, number_of_steps or
sys.maxint))
sv.start_queue_runners(sess)
logging.info('Starting Queues.')
if is_chief and sync_optimizer is not None:
sv.start_queue_runners(sess, [chief_queue_runner])
sess.run(init_tokens_op)
try:
while not sv.should_stop():
total_loss, should_stop = train_step_fn(
sess, train_op, global_step, train_step_kwargs)
if should_stop:
logging.info('Stopping Training.')
break
except errors.OutOfRangeError:
# OutOfRangeError is thrown when epoch limit per
# tf.train.limit_epochs is reached.
logging.info('Caught OutOfRangeError. Stopping Training.')
if logdir and sv.is_chief:
logging.info('Finished training! Saving model to disk.')
sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
logging.info('Retrying training!')
should_retry = True
return total_loss
|
{
"content_hash": "998621dcdddbf8bf69e31e55b783ee92",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 80,
"avg_line_length": 38.80452127659574,
"alnum_prop": 0.6707103937493575,
"repo_name": "chris-chris/tensorflow",
"id": "b70d612f55b595be028c8c79bf9fdc9b69c8df32",
"size": "29870",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/slim/python/slim/learning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182940"
},
{
"name": "C++",
"bytes": "23809308"
},
{
"name": "CMake",
"bytes": "158686"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "824883"
},
{
"name": "HTML",
"bytes": "886927"
},
{
"name": "Java",
"bytes": "290789"
},
{
"name": "JavaScript",
"bytes": "14005"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "215912"
},
{
"name": "Python",
"bytes": "20869269"
},
{
"name": "Shell",
"bytes": "337580"
},
{
"name": "TypeScript",
"bytes": "1267602"
}
],
"symlink_target": ""
}
|
from srttimeadjuster import main
if __name__ == '__main__':
main.main()
|
{
"content_hash": "1ac2100c0eb999868dbf244c58f5903f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 19.25,
"alnum_prop": 0.6103896103896104,
"repo_name": "ArchibaldBienetre/srttimeadjuster",
"id": "eb09f587a7b08204a52e18ae83cc48d23ca2f9bf",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15631"
},
{
"name": "Shell",
"bytes": "104"
}
],
"symlink_target": ""
}
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import time
import warnings
from unittest import SkipTest, skipUnless
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.testcases import SerializeMixin
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SerializeMixin, SimpleTestCase):
# makemessages scans the current working directory and writes in the
# locale subdirectory. There aren't any options to control this. As a
# consequence tests can't run in parallel. Since i18n tests run in less
# than 4 seconds, serializing them with SerializeMixin is acceptable.
lockfile = __file__
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def _assertPoKeyword(self, keyword, expected_value, haystack, use_quotes=True):
q = '"'
if use_quotes:
expected_value = '"%s"' % expected_value
q = "'"
needle = '%s %s' % (keyword, expected_value)
expected_value = re.escape(expected_value)
return self.assertTrue(re.search('^%s %s' % (keyword, expected_value), haystack, re.MULTILINE),
'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertMsgId(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid', msgid, haystack, use_quotes=use_quotes)
def assertMsgIdPlural(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid_plural', msgid, haystack, use_quotes=use_quotes)
def assertMsgStr(self, msgstr, haystack, use_quotes=True):
return self._assertPoKeyword('msgstr', msgstr, haystack, use_quotes=use_quotes)
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
path = os.path.join(cwd_prefix, *comment_parts)
parts.append(path)
if isinstance(line_number, six.string_types):
line_number = self._get_token_line_number(path, line_number)
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertIn(needle, po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertNotIn(needle, po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def _get_token_line_number(self, path, token):
with open(path) as f:
for line, content in enumerate(f, 1):
if token in force_text(content):
return line
self.fail("The token '%s' could not be found in %s, please check the test config" % (token, path))
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
"""
self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py')
verifies that the django.po file has a gettext-style location comment of the form
`#: dirA/dirB/foo.py:42`
(or `#: .\dirA\dirB\foo.py:42` on Windows)
None can be passed for the line_number argument to skip checking of
the :42 suffix part.
A string token can also be pased as line_number, in which case it
will be searched in the template, and its line number will be used.
A msgid is a suitable candidate.
"""
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
"""Check the opposite of assertLocationComment()"""
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
"""
Assert that file was recently modified (modification time was less than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
"""
Assert that file was not recently modified (modification time was more than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertNotIn('This comment should not be extracted', po_contents)
# Comments in templates
self.assertIn('#. Translators: This comment should be extracted', po_contents)
self.assertIn("#. Translators: Django comment block for translators\n#. string's meaning unveiled", po_contents)
self.assertIn('#. Translators: One-line translator comment #1', po_contents)
self.assertIn('#. Translators: Two-line translator comment #1\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #2', po_contents)
self.assertIn('#. Translators: Two-line translator comment #2\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #3', po_contents)
self.assertIn('#. Translators: Two-line translator comment #3\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #4', po_contents)
self.assertIn('#. Translators: Two-line translator comment #4\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #5 -- with non ASCII characters: áéíóúö', po_contents)
self.assertIn('#. Translators: Two-line translator comment #5 -- with non ASCII characters: áéíóúö\n#. continued here.', po_contents)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("Get my line number", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 'Get my line number', 'templates', 'test.html')
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
self.assertTrue(MakeMessagesCommand.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
with self.assertRaises(SyntaxError) as context_manager:
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
six.assertRegex(
self, str(context_manager.exception),
r'Translation blocks must not include other block tags: blocktrans \(file templates[/\\]template_with_error\.tpl, line 3\)'
)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Special trans context #1"', po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertIn('msgctxt "Special trans context #2"', po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertIn('msgctxt "Special trans context #3"', po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% trans %} with a filter
for minor_part in 'abcdefgh': # Iterate from #7.1a to #7.1h template markers
self.assertIn('msgctxt "context #7.1{}"'.format(minor_part), po_contents)
self.assertMsgId('Translatable literal #7.1{}'.format(minor_part), po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context #1"', po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertIn('msgctxt "Special blocktrans context #2"', po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertIn("Translatable literal #8b-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #3"', po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertIn("Translatable literal #8c-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #4"', po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Context wrapped in single quotes"', po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Special blocktrans context wrapped in single quotes"', po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #1' \(file templates[/\\]comments.thtml, line 4\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #3' \(file templates[/\\]comments.thtml, line 6\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #4' \(file templates[/\\]comments.thtml, line 8\) was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertNotIn('ignored comment #1', po_contents)
self.assertNotIn('Translators: ignored i18n comment #1', po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertNotIn('ignored i18n comment #2', po_contents)
self.assertNotIn('ignored comment #2', po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertNotIn('ignored comment #3', po_contents)
self.assertNotIn('ignored i18n comment #3', po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertNotIn('ignored comment #4', po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertNotIn('ignored comment #5', po_contents)
self.assertNotIn('ignored i18n comment #4', po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertIn('#. Translators: valid i18n comment #5', po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertIn('#. Translators: valid i18n comment #6', po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertIn('#. Translators: valid i18n comment #7', po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
def test_makemessages_find_files(self):
"""
Test that find_files only discover files having the proper extensions.
"""
cmd = MakeMessagesCommand()
cmd.ignore_patterns = ['CVS', '.*', '*~', '*.pyc']
cmd.symlinks = False
cmd.domain = 'django'
cmd.extensions = ['html', 'txt', 'py']
cmd.verbosity = 0
cmd.locale_paths = []
cmd.default_locale_path = os.path.join(self.test_dir, 'locale')
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.py', '.html', '.txt'}), set())
cmd.extensions = ['js']
cmd.domain = 'djangojs'
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.js'}), set())
@mock.patch('django.core.management.commands.makemessages.popen_wrapper')
def test_makemessages_gettext_version(self, mocked_popen_wrapper):
# "Normal" output:
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.18.1\n"
"Copyright (C) 1995-1998, 2000-2010 Free Software Foundation, Inc.\n"
"License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n"
"Written by Ulrich Drepper.\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 18, 1))
# Version number with only 2 parts (#23788)
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.17\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 17))
# Bad version output
mocked_popen_wrapper.return_value = (
"any other return value\n", '', 0)
cmd = MakeMessagesCommand()
with six.assertRaisesRegex(self, CommandError, "Unable to get gettext version. Is it installed?"):
cmd.gettext_version
def test_po_file_encoding_when_updating(self):
"""Update of PO file doesn't corrupt it with non-UTF-8 encoding on Python3+Windows (#23271)"""
BR_PO_BASE = 'locale/pt_BR/LC_MESSAGES/django'
os.chdir(self.test_dir)
shutil.copyfile(BR_PO_BASE + '.pristine', BR_PO_BASE + '.po')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'locale', 'pt_BR', 'LC_MESSAGES', 'django.po'))
management.call_command('makemessages', locale=['pt_BR'], verbosity=0)
self.assertTrue(os.path.exists(BR_PO_BASE + '.po'))
with io.open(BR_PO_BASE + '.po', 'r', encoding='utf-8') as fp:
po_contents = force_text(fp.read())
self.assertMsgStr("Größe", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('gettext_noop should, too.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
"""
Regression test for #23717.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# Windows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertIn('templates_symlinked/test.html', po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertIn('Plural-Forms: nplurals=2; plural=(n != 1)', po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
def test_trans_and_plural_blocktrans_collision(self):
"""
Ensures a correct workaround for the gettext bug when handling a literal
found inside a {% trans %} tag and also in another file inside a
{% blocktrans %} with a plural (#17375).
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], extensions=['html', 'djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertNotIn("#-#-#-#-# django.pot (PACKAGE VERSION) #-#-#-#-#\\n", po_contents)
self.assertMsgId('First `trans`, then `blocktrans` with a plural', po_contents)
self.assertMsgIdPlural('Plural for a `trans` and `blocktrans` collision case', po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 'Translatable literal #6b', 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
super(MultipleLocaleExtractionTests, self).tearDown()
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
"""
Set access and modification times to the Unix epoch time for all the .po files.
"""
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
super(CustomLayoutExtractionTests, self).setUp()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=[os.path.join(this_directory, 'project_dir', 'project_locale')],
)
def test_project_locale_paths(self):
"""
Test that:
* translations for an app containing a locale folder are stored in that folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
|
{
"content_hash": "04c8a28ce8d79c50d112db3485c36cb3",
"timestamp": "",
"source": "github",
"line_count": 764,
"max_line_length": 194,
"avg_line_length": 46.738219895287955,
"alnum_prop": 0.6339475747731601,
"repo_name": "joequery/django",
"id": "b9c4b4fc3e5f0480e421523755c3b145c47dac51",
"size": "35722",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/i18n/test_extraction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50223"
},
{
"name": "HTML",
"bytes": "174074"
},
{
"name": "JavaScript",
"bytes": "248667"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11335813"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import ldap
import config
SCOPE = ldap.SCOPE_SUBTREE
class LdapApi:
def __init__(self, uri, nia=None, password=None, port="389"):
self.port = port
self.ldapCon = ldap.initialize(uri + ":" +self.port)
self.nia = nia
self.password = password
def search(self, dn, filt='(objectClass=*)', attrlist=None):
result = self.ldapCon.search_s(dn,SCOPE,filt,attrlist)
return result
def auth(self):
data = self.search(config.LDAP_DN, '(uid=*' + str(self.nia) + '*)', config.LDAP_FIELDS)
data = data[0][0]
try:
self.ldapCon.simple_bind_s(data, self.password)
return 0
except ldap.LDAPError as e:
return 1
|
{
"content_hash": "ffcf88d8eead827f135c43d884ca0b39",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 95,
"avg_line_length": 29.625,
"alnum_prop": 0.5949367088607594,
"repo_name": "Patataman/StudentApi",
"id": "d3ff643436bd9bd2e7cb8e15b4d49831fdc66b57",
"size": "736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ldapApi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10647"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("algorithms", "0012_auto_20210728_1019"),
]
operations = [
migrations.AlterModelOptions(
name="job",
options={
"ordering": ("created",),
"permissions": [("view_logs", "Can view the jobs logs")],
},
),
]
|
{
"content_hash": "5a4f02b8f224508bfbfd3ddc2229d5a0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 23.764705882352942,
"alnum_prop": 0.5099009900990099,
"repo_name": "comic/comic-django",
"id": "15037e4de55ddb47a193d8a579fa0cd7ffa5926b",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/grandchallenge/algorithms/migrations/0013_auto_20210830_1128.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
import os
import sys
import logging
import angr
import psutil
from common import bin_location
def test_memory_watcher():
binary = os.path.join(bin_location, 'tests', 'x86_64', 'veritesting_a')
proj = angr.Project(binary, auto_load_libs=False)
simgr = proj.factory.simulation_manager()
memory_watcher = angr.exploration_techniques.MemoryWatcher()
simgr.use_technique(memory_watcher)
# Initially build some paths
while len(simgr.active) < 32 and simgr.active != []:
simgr.step()
# Something else went wrong..
assert simgr.active != []
# Set fake that memory watcher believes we're too low on memory
memory_watcher.min_memory = psutil.virtual_memory().total
previous_active = len(simgr.active)
# Step once to move things over
simgr.step()
assert simgr.active == []
assert len(getattr(simgr, memory_watcher.memory_stash)) == previous_active
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.exploration_techniques.memory_watcher").setLevel('DEBUG')
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
{
"content_hash": "15625d90bc5cc976ebc594d861817ec6",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 91,
"avg_line_length": 28.9375,
"alnum_prop": 0.6558675305975522,
"repo_name": "angr/angr",
"id": "70040768a4df7fb64feaf876a2d2bf87d9980506",
"size": "1389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_memory_watcher.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6694"
},
{
"name": "C++",
"bytes": "146292"
},
{
"name": "Makefile",
"bytes": "946"
},
{
"name": "Python",
"bytes": "27717304"
}
],
"symlink_target": ""
}
|
import unittest
import os
import pprint
import dataclasses
import xml.etree.ElementTree as ET
# from vkxml2rs.elements import *
# from vkxml2rs.parse import *
from vkxml2rs import *
from modules.parse import *
class TestConverter(unittest.TestCase):
def test_parse_basetype(self):
xml = (
'<types>'
' <type category="basetype">typedef <type>uint32_t</type> <name>VkSampleMask</name>;</type>'
' <type category="basetype">typedef <type>uint32_t</type> <name>VkBool32</name>;</type>'
' <type category="basetype">typedef <type>uint32_t</type> <name>VkFlags</name>;</type>'
' <type category="basetype">typedef <type>uint64_t</type> <name>VkDeviceSize</name>;</type>'
' <type category="basetype">typedef <type>uint64_t</type> <name>VkDeviceAddress</name>;</type>'
'</types>'
)
root = ET.fromstring(xml)
children = root.getchildren()
basetypes = []
for child in children:
basetypes.append(parse_basetype(child))
self.assertEqual(basetypes[0], BaseType('VkSampleMask', 'uint32_t'))
self.assertEqual(basetypes[1], BaseType('VkBool32', 'uint32_t'))
self.assertEqual(basetypes[2], BaseType('VkFlags', 'uint32_t'))
self.assertEqual(basetypes[3], BaseType('VkDeviceSize', 'uint64_t'))
self.assertEqual(basetypes[4], BaseType('VkDeviceAddress', 'uint64_t'))
def test_parse_handle(self):
xml = (
'<types>'
' <type category="handle"><type>VK_DEFINE_HANDLE</type>(<name>VkInstance</name>)</type>'
' <type category="handle" parent="VkInstance"><type>VK_DEFINE_HANDLE</type>(<name>VkPhysicalDevice</name>)</type>'
'</types>'
)
root = ET.fromstring(xml)
children = root.getchildren()
handles = []
for child in children:
handles.append(parse_handle(child))
self.assertEqual(handles[0], Handle('VkInstance'))
self.assertEqual(handles[1], Handle('VkPhysicalDevice'))
def test_parse_funcpointer(self):
xml = (
'<types>\n'
'<type category="funcpointer">typedef VkBool32 (VKAPI_PTR *<name>PFN_vkDebugReportCallbackEXT</name>)(\n'
'<type>VkDebugReportFlagsEXT</type> flags,\n'
'<type>VkDebugReportObjectTypeEXT</type> objectType,\n'
'<type>uint64_t</type> object,\n'
'<type>size_t</type> location,\n'
'<type>int32_t</type> messageCode,\n'
'const <type>char</type>* pLayerPrefix,\n'
'const <type>char</type>* pMessage,\n'
'<type>void</type>* pUserData);</type>\n'
'<type category="funcpointer" requires="VkDebugUtilsMessengerCallbackDataEXT">typedef VkBool32 (VKAPI_PTR *<name>PFN_vkDebugUtilsMessengerCallbackEXT</name>)(\n'
'<type>VkDebugUtilsMessageSeverityFlagBitsEXT</type> messageSeverity,\n'
'<type>VkDebugUtilsMessageTypeFlagsEXT</type> messageTypes,\n'
'const <type>VkDebugUtilsMessengerCallbackDataEXT</type>* pCallbackData,\n'
'<type>void</type>* pUserData);</type>\n'
'</types>\n'
)
root = ET.fromstring(xml)
children = root.getchildren()
funcpointers = []
for child in children:
funcpointers.append(parse_funcpointer(child))
self.assertEqual(
funcpointers[0],
FuncPointer(
'PFN_vkDebugReportCallbackEXT',
'VkBool32',
[
Param('VkDebugReportFlagsEXT', 'flags'),
Param('VkDebugReportObjectTypeEXT', 'objectType'),
Param('uint64_t', 'object'),
Param('size_t', 'location'),
Param('int32_t', 'messageCode'),
Param('const char *', 'pLayerPrefix'),
Param('const char *', 'pMessage'),
Param('void *', 'pUserData'),
]))
self.assertEqual(
funcpointers[1],
FuncPointer(
'PFN_vkDebugUtilsMessengerCallbackEXT',
'VkBool32',
[
Param('VkDebugUtilsMessageSeverityFlagBitsEXT',
'messageSeverity'),
Param('VkDebugUtilsMessageTypeFlagsEXT', 'messageTypes'),
Param('const VkDebugUtilsMessengerCallbackDataEXT *',
'pCallbackData'),
Param('void *', 'pUserData'),
]))
def test_parse_enums(self):
xml = (
'<registry>'
' <enums name="VkPerformanceCounterScopeKHR" type="enum">\n'
' <enum value="0" name="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR"/>\n'
' <enum value="1" name="VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR"/>\n'
' <enum value="2" name="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR"/>\n'
' <enum name="VK_QUERY_SCOPE_COMMAND_BUFFER_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR"/>\n'
' <enum name="VK_QUERY_SCOPE_RENDER_PASS_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR"/>\n'
' <enum name="VK_QUERY_SCOPE_COMMAND_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR"/>\n'
' </enums>\n'
' <comment>Flags</comment>\n'
' <enums name="VkQueueFlagBits" type="bitmask">\n'
' <enum bitpos="0" name="VK_QUEUE_GRAPHICS_BIT" comment="Queue supports graphics operations"/>\n'
' <enum bitpos="1" name="VK_QUEUE_COMPUTE_BIT" comment="Queue supports compute operations"/>\n'
' <enum bitpos="2" name="VK_QUEUE_TRANSFER_BIT" comment="Queue supports transfer operations"/>\n'
' <enum bitpos="3" name="VK_QUEUE_SPARSE_BINDING_BIT" comment="Queue supports sparse resource memory management operations"/>\n'
' </enums>\n'
'</registry>\n'
)
root = ET.fromstring(xml)
children = root.getchildren()
enums = []
for child in children:
if child.tag == 'enums':
enums.append(parse_enums(child))
self.assertEqual(enums[0], Enums('VkPerformanceCounterScopeKHR', [
Enum('VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR', 0),
Enum('VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR', 1),
Enum('VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR', 2),
Enum('VK_QUERY_SCOPE_COMMAND_BUFFER_KHR', 0),
Enum('VK_QUERY_SCOPE_RENDER_PASS_KHR', 1),
Enum('VK_QUERY_SCOPE_COMMAND_KHR', 2),
]))
self.assertEqual(enums[1], Enums('VkQueueFlagBits', [
Enum('VK_QUEUE_GRAPHICS_BIT', 1),
Enum('VK_QUEUE_COMPUTE_BIT', 2),
Enum('VK_QUEUE_TRANSFER_BIT', 4),
Enum('VK_QUEUE_SPARSE_BINDING_BIT', 8),
]))
def test_parse_struct(self):
xml = (
'<registry>'
' <type category="struct" name="VkApplicationInfo">\n'
' <member values="VK_STRUCTURE_TYPE_APPLICATION_INFO"><type>VkStructureType</type> <name>sType</name></member>\n'
' <member>const <type>void</type>* <name>pNext</name></member>\n'
' <member optional="true" len="null-terminated">const <type>char</type>* <name>pApplicationName</name></member>\n'
' <member><type>uint32_t</type> <name>applicationVersion</name></member>\n'
' <member optional="true" len="null-terminated">const <type>char</type>* <name>pEngineName</name></member>\n'
' <member><type>uint32_t</type> <name>engineVersion</name></member>\n'
' <member><type>uint32_t</type> <name>apiVersion</name></member>\n'
' </type>\n'
'</registry>\n'
)
root = ET.fromstring(xml)
children = root.getchildren()
structs = []
for child in children:
if child.tag == 'type' and child.attrib['category'] == 'struct':
structs.append(parse_struct(child))
self.assertEqual(structs[0], Struct('VkApplicationInfo', [
Member('sType', 'VkStructureType'),
Member('pNext', 'const void *'),
Member('pApplicationName', 'const char *'),
Member('applicationVersion', 'uint32_t'),
Member('pEngineName', 'const char *'),
Member('engineVersion', 'uint32_t'),
Member('apiVersion', 'uint32_t'),
]))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c1ef877926251feddc71d5cace353642",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 173,
"avg_line_length": 51.21546961325967,
"alnum_prop": 0.5294498381877023,
"repo_name": "tsukushibito/tempura",
"id": "9db56744632ebafd72fc3b69e4aa0f08688a0127",
"size": "9270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generator/tests/test_vkxml2rs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, basedir)
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
# -- Options for Napoleon Extension --------------------------------------------
# Parse Google style docstrings.
# See http://google.github.io/styleguide/pyguide.html
napoleon_google_docstring = True
# Parse NumPy style docstrings.
# See https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
napoleon_numpy_docstring = True
# Should special members (like __membername__) and private members
# (like _membername) members be included in the documentation if they
# have docstrings.
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
# If True, docstring sections will use the ".. admonition::" directive.
# If False, docstring sections will use the ".. rubric::" directive.
# One may look better than the other depending on what HTML theme is used.
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
# If True, use Sphinx :ivar: directive for instance variables:
# :ivar attr1: Description of attr1.
# :type attr1: type
# If False, use Sphinx .. attribute:: directive for instance variables:
# .. attribute:: attr1
#
# *type*
#
# Description of attr1.
napoleon_use_ivar = False
# If True, use Sphinx :param: directive for function parameters:
# :param arg1: Description of arg1.
# :type arg1: type
# If False, output function parameters using the :parameters: field:
# :parameters: **arg1** (*type*) -- Description of arg1.
napoleon_use_param = True
# If True, use Sphinx :rtype: directive for the return type:
# :returns: Description of return value.
# :rtype: type
# If False, output the return type inline with the return description:
# :returns: *type* -- Description of return value.
napoleon_use_rtype = True
# -- Autodoc configuration -----------------------------------------------------
autoclass_content = 'class'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members']
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxcontrib.napoleon']
intersphinx_mapping = {
'pockets': ('https://pockets.readthedocs.io/en/latest/', None),
'python': ('https://docs.python.org/3.4', None),
'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
# source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'napoleon'
copyright = u'2013-2015, Rob Ruana'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.2.10'
# The full version, including alpha/beta/rc tags.
#release = '0.2.10'
version_path = os.path.join(basedir,
'sphinxcontrib',
'napoleon',
'_version.py')
exec(open(version_path).read())
release = version = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# html_theme_options = {
# 'nosidebar' : False,
# 'sidebarwidth' : 230,
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'napoleondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'napoleon.tex', u'Napoleon Documentation',
u'Rob Ruana', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'napoleon', u'Napoleon Documentation',
[u'Rob Ruana'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'napoleon', u'Napoleon Documentation',
u'Rob Ruana', 'napoleon', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
{
"content_hash": "b143c226070b2bbeeb1ab6c856a79f17",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 80,
"avg_line_length": 32.77742946708464,
"alnum_prop": 0.6836266258607498,
"repo_name": "Lemma1/MAC-POSTS",
"id": "f0e28a7c0b83567f9f9fc881a634fe9a114a4662",
"size": "10814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc_builder/sphinx-contrib/napoleon/docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "3394"
},
{
"name": "Batchfile",
"bytes": "103388"
},
{
"name": "C",
"bytes": "5399"
},
{
"name": "C++",
"bytes": "3595985"
},
{
"name": "CMake",
"bytes": "53433"
},
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "HTML",
"bytes": "18640"
},
{
"name": "JavaScript",
"bytes": "44610"
},
{
"name": "Jupyter Notebook",
"bytes": "7469541"
},
{
"name": "MATLAB",
"bytes": "5439"
},
{
"name": "Makefile",
"bytes": "148059"
},
{
"name": "Python",
"bytes": "1950140"
},
{
"name": "Shell",
"bytes": "2554"
}
],
"symlink_target": ""
}
|
import hashlib
import os
import random
import re
import string
import time
from base64 import decodestring
from contextlib import contextmanager
from datetime import datetime
from django import dispatch, forms
from django.conf import settings
from django.contrib.auth.hashers import BasePasswordHasher, mask_hash
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.core import validators
from django.db import models, transaction
from django.template import Context, loader
from django.utils import translation
from django.utils.crypto import constant_time_compare
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str, smart_unicode
from django.utils.functional import lazy
import caching.base as caching
import commonware.log
import tower
from tower import ugettext as _
import amo
import amo.models
from access.models import Group, GroupUser
from amo.urlresolvers import reverse
from translations.fields import NoLinksField, save_signal
from translations.models import Translation
from translations.query import order_by_translation
log = commonware.log.getLogger('z.users')
class SHA512PasswordHasher(BasePasswordHasher):
"""
The SHA2 password hashing algorithm, 512 bits.
"""
algorithm = 'sha512'
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.new(self.algorithm,
smart_str(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def get_hexdigest(algorithm, salt, raw_password):
if 'base64' in algorithm:
# These are getpersonas passwords with base64 encoded salts.
salt = decodestring(salt)
algorithm = algorithm.replace('+base64', '')
if algorithm.startswith('sha512+MD5'):
# These are persona specific passwords when we imported
# users from getpersonas.com. The password is md5 hashed
# and then sha512'd.
md5 = hashlib.new('md5', raw_password).hexdigest()
return hashlib.new('sha512', smart_str(salt + md5)).hexdigest()
return hashlib.new(algorithm, smart_str(salt + raw_password)).hexdigest()
def rand_string(length):
return ''.join(random.choice(string.letters) for i in xrange(length))
def create_password(algorithm, raw_password):
salt = get_hexdigest(algorithm, rand_string(12), rand_string(12))[:64]
hsh = get_hexdigest(algorithm, salt, raw_password)
return '$'.join([algorithm, salt, hsh])
class UserForeignKey(models.ForeignKey):
"""
A replacement for models.ForeignKey('users.UserProfile').
This field uses UserEmailField to make form fields key off the user's email
instead of the primary key id. We also hook up autocomplete automatically.
"""
def __init__(self, *args, **kw):
super(UserForeignKey, self).__init__(UserProfile, *args, **kw)
def value_from_object(self, obj):
return getattr(obj, self.name).email
def formfield(self, **kw):
defaults = {'form_class': UserEmailField}
defaults.update(kw)
return models.Field.formfield(self, **defaults)
class UserEmailField(forms.EmailField):
def clean(self, value):
if value in validators.EMPTY_VALUES:
raise forms.ValidationError(self.error_messages['required'])
try:
return UserProfile.objects.get(email=value)
except UserProfile.DoesNotExist:
raise forms.ValidationError(_('No user with that email.'))
def widget_attrs(self, widget):
lazy_reverse = lazy(reverse, str)
return {'class': 'email-autocomplete',
'data-src': lazy_reverse('users.ajax')}
class UserManager(BaseUserManager, amo.models.ManagerBase):
def create_user(self, username, email, password=None, fxa_id=None):
# We'll send username=None when registering through FxA to try and
# generate a username from the email.
if username is None:
username = self._generate_username(email)
user = self.model(username=username, email=email, fxa_id=fxa_id)
# FxA won't set a password so don't let a user log in with one.
if password is None:
user.set_unusable_password()
else:
user.set_password(password)
log.debug('Creating user with email {} and username {}'.format(
email, username))
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
"""
Creates and saves a superuser.
"""
user = self.create_user(username, email, password)
admins = Group.objects.get(name='Admins')
GroupUser.objects.create(user=user, group=admins)
return user
def _generate_username(self, seed):
"""Generate a username from a seed which is intended to be an email
address. If the username is taken a single attempt will be made to
append a random number to it and get a unique username.
"""
log.info('Generating username for {}'.format(seed))
if self.model.objects.filter(username=seed).exists():
# Only make one attempt at generating a new username. This isn't
# meant to be exhaustive but to make it difficult to maliciously
# prevent someone from signing up. See #967 for more discussion.
username = '{seed}-{num}'.format(
seed=seed, num=random.randint(1000, 9999))
log.warning('Username taken for {} trying {}'.format(
seed, username))
return username
else:
log.info('Using seeded username for {}'.format(seed))
return seed
AbstractBaseUser._meta.get_field('password').max_length = 255
class UserProfile(amo.models.OnChangeMixin, amo.models.ModelBase,
AbstractBaseUser):
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
username = models.CharField(max_length=255, default='', unique=True)
display_name = models.CharField(max_length=255, default='', null=True,
blank=True)
email = models.EmailField(unique=True, null=True)
averagerating = models.CharField(max_length=255, blank=True, null=True)
bio = NoLinksField(short=False)
confirmationcode = models.CharField(max_length=255, default='',
blank=True)
deleted = models.BooleanField(default=False)
display_collections = models.BooleanField(default=False)
display_collections_fav = models.BooleanField(default=False)
homepage = models.URLField(max_length=255, blank=True, default='')
location = models.CharField(max_length=255, blank=True, default='')
notes = models.TextField(blank=True, null=True)
notifycompat = models.BooleanField(default=True)
notifyevents = models.BooleanField(default=True)
occupation = models.CharField(max_length=255, default='', blank=True)
# This is essentially a "has_picture" flag right now
picture_type = models.CharField(max_length=75, default='', blank=True)
read_dev_agreement = models.DateTimeField(null=True, blank=True)
last_login_ip = models.CharField(default='', max_length=45, editable=False)
last_login_attempt = models.DateTimeField(null=True, editable=False)
last_login_attempt_ip = models.CharField(default='', max_length=45,
editable=False)
failed_login_attempts = models.PositiveIntegerField(default=0,
editable=False)
is_verified = models.BooleanField(default=True)
region = models.CharField(max_length=11, null=True, blank=True,
editable=False)
lang = models.CharField(max_length=5, null=True, blank=True,
default=settings.LANGUAGE_CODE)
t_shirt_requested = models.DateTimeField(blank=True, null=True,
default=None, editable=False)
fxa_id = models.CharField(blank=True, null=True, max_length=128)
class Meta:
db_table = 'users'
def __init__(self, *args, **kw):
super(UserProfile, self).__init__(*args, **kw)
if self.username:
self.username = smart_unicode(self.username)
def __unicode__(self):
return u'%s: %s' % (self.id, self.display_name or self.username)
@property
def is_superuser(self):
return self.groups.filter(rules='*:*').exists()
@property
def is_staff(self):
from access import acl
return acl.action_allowed_user(self, 'Admin', '%')
def has_perm(self, perm, obj=None):
return self.is_superuser
def has_module_perms(self, app_label):
return self.is_superuser
backend = 'users.backends.AmoUserBackend'
def is_anonymous(self):
return False
def get_user_url(self, name='profile', src=None, args=None):
"""
We use <username> as the slug, unless it contains gross
characters - in which case use <id> as the slug.
"""
from amo.utils import urlparams
chars = '/<>"\''
slug = self.username
if not self.username or any(x in chars for x in self.username):
slug = self.id
args = args or []
url = reverse('users.%s' % name, args=[slug] + args)
return urlparams(url, src=src)
def get_url_path(self, src=None):
return self.get_user_url('profile', src=src)
def flush_urls(self):
urls = ['*/user/%d/' % self.id,
self.picture_url,
]
return urls
@amo.cached_property
def addons_listed(self):
"""Public add-ons this user is listed as author of."""
return self.addons.reviewed().filter(
addonuser__user=self, addonuser__listed=True)
@property
def num_addons_listed(self):
"""Number of public add-ons this user is listed as author of."""
return self.addons.reviewed().filter(
addonuser__user=self, addonuser__listed=True).count()
def my_addons(self, n=8, with_unlisted=False):
"""Returns n addons"""
addons = self.addons
if with_unlisted:
addons = self.addons.model.with_unlisted.filter(authors=self)
qs = order_by_translation(addons, 'name')
return qs[:n]
@property
def picture_dir(self):
from amo.helpers import user_media_path
split_id = re.match(r'((\d*?)(\d{0,3}?))\d{1,3}$', str(self.id))
return os.path.join(user_media_path('userpics'),
split_id.group(2) or '0',
split_id.group(1) or '0')
@property
def picture_path(self):
return os.path.join(self.picture_dir, str(self.id) + '.png')
@property
def picture_url(self):
from amo.helpers import user_media_url
if not self.picture_type:
return settings.STATIC_URL + '/img/zamboni/anon_user.png'
else:
split_id = re.match(r'((\d*?)(\d{0,3}?))\d{1,3}$', str(self.id))
modified = int(time.mktime(self.modified.timetuple()))
path = "/".join([
split_id.group(2) or '0',
split_id.group(1) or '0',
"%s.png?modified=%s" % (self.id, modified)
])
return user_media_url('userpics') + path
@amo.cached_property
def is_developer(self):
return self.addonuser_set.exists()
@amo.cached_property
def is_addon_developer(self):
return self.addonuser_set.exclude(
addon__type=amo.ADDON_PERSONA).exists()
@amo.cached_property
def is_artist(self):
"""Is this user a Personas Artist?"""
return self.addonuser_set.filter(
addon__type=amo.ADDON_PERSONA).exists()
@amo.cached_property
def needs_tougher_password(user):
from access import acl
return (acl.action_allowed_user(user, 'Admin', '%') or
acl.action_allowed_user(user, 'Addons', 'Edit') or
acl.action_allowed_user(user, 'Addons', 'Review') or
acl.action_allowed_user(user, 'Apps', 'Review') or
acl.action_allowed_user(user, 'Personas', 'Review') or
acl.action_allowed_user(user, 'Users', 'Edit'))
@property
def source(self):
if self.fxa_id:
return 'fxa'
else:
return 'amo'
@property
def name(self):
return smart_unicode(self.display_name or self.username)
welcome_name = name
@amo.cached_property
def reviews(self):
"""All reviews that are not dev replies."""
qs = self._reviews_all.filter(reply_to=None)
# Force the query to occur immediately. Several
# reviews-related tests hang if this isn't done.
return qs
def anonymize(self):
log.info(u"User (%s: <%s>) is being anonymized." % (self, self.email))
self.email = None
self.password = "sha512$Anonymous$Password"
self.fxa_id = None
self.username = "Anonymous-%s" % self.id # Can't be null
self.display_name = None
self.homepage = ""
self.deleted = True
self.picture_type = ""
self.save()
@transaction.commit_on_success
def restrict(self):
from amo.utils import send_mail
log.info(u'User (%s: <%s>) is being restricted and '
'its user-generated content removed.' % (self, self.email))
g = Group.objects.get(rules='Restricted:UGC')
GroupUser.objects.create(user=self, group=g)
self.reviews.all().delete()
self.collections.all().delete()
t = loader.get_template('users/email/restricted.ltxt')
send_mail(_('Your account has been restricted'),
t.render(Context({})), None, [self.email],
use_blacklist=False, real_email=True)
def unrestrict(self):
log.info(u'User (%s: <%s>) is being unrestricted.' % (self,
self.email))
GroupUser.objects.filter(user=self,
group__rules='Restricted:UGC').delete()
def generate_confirmationcode(self):
if not self.confirmationcode:
self.confirmationcode = ''.join(random.sample(string.letters +
string.digits, 60))
return self.confirmationcode
def set_unusable_password(self):
self.password = ''
def has_usable_password(self):
"""Override AbstractBaseUser.has_usable_password."""
# We also override the check_password method, and don't rely on
# settings.PASSWORD_HASHERS, and don't use "set_unusable_password", so
# we want to bypass most of AbstractBaseUser.has_usable_password
# checks.
return bool(self.password) # Not None and not empty.
def check_password(self, raw_password):
if not self.has_usable_password():
return False
if '$' not in self.password:
valid = (get_hexdigest('md5', '', raw_password) == self.password)
if valid:
# Upgrade an old password.
self.set_password(raw_password)
self.save()
return valid
algo, salt, hsh = self.password.split('$')
# Complication due to getpersonas account migration; we don't
# know if passwords were utf-8 or latin-1 when hashed. If you
# can prove that they are one or the other, you can delete one
# of these branches.
if '+base64' in algo and isinstance(raw_password, unicode):
if hsh == get_hexdigest(algo, salt, raw_password.encode('utf-8')):
return True
else:
try:
return hsh == get_hexdigest(algo, salt,
raw_password.encode('latin1'))
except UnicodeEncodeError:
return False
else:
return hsh == get_hexdigest(algo, salt, raw_password)
def set_password(self, raw_password, algorithm='sha512'):
self.password = create_password(algorithm, raw_password)
# Can't do CEF logging here because we don't have a request object.
def email_confirmation_code(self):
from amo.utils import send_mail
log.debug("Sending account confirmation code for user (%s)", self)
url = "%s%s" % (settings.SITE_URL,
reverse('users.confirm',
args=[self.id, self.confirmationcode]))
domain = settings.DOMAIN
t = loader.get_template('users/email/confirm.ltxt')
c = {'domain': domain, 'url': url, }
send_mail(_("Please confirm your email address"),
t.render(Context(c)), None, [self.email],
use_blacklist=False, real_email=True)
def log_login_attempt(self, successful):
"""Log a user's login attempt"""
self.last_login_attempt = datetime.now()
self.last_login_attempt_ip = commonware.log.get_remote_addr()
if successful:
log.debug(u"User (%s) logged in successfully" % self)
self.failed_login_attempts = 0
self.last_login_ip = commonware.log.get_remote_addr()
else:
log.debug(u"User (%s) failed to log in" % self)
if self.failed_login_attempts < 16777216:
self.failed_login_attempts += 1
self.save(update_fields=['last_login_ip', 'last_login_attempt',
'last_login_attempt_ip',
'failed_login_attempts'])
def mobile_collection(self):
return self.special_collection(
amo.COLLECTION_MOBILE,
defaults={'slug': 'mobile', 'listed': False,
'name': _('My Mobile Add-ons')})
def favorites_collection(self):
return self.special_collection(
amo.COLLECTION_FAVORITES,
defaults={'slug': 'favorites', 'listed': False,
'name': _('My Favorite Add-ons')})
def special_collection(self, type_, defaults):
from bandwagon.models import Collection
c, new = Collection.objects.get_or_create(
author=self, type=type_, defaults=defaults)
if new:
# Do an extra query to make sure this gets transformed.
c = Collection.objects.using('default').get(id=c.id)
return c
@contextmanager
def activate_lang(self):
"""
Activate the language for the user. If none is set will go to the site
default which is en-US.
"""
lang = self.lang if self.lang else settings.LANGUAGE_CODE
old = translation.get_language()
tower.activate(lang)
yield
tower.activate(old)
def remove_locale(self, locale):
"""Remove the given locale for the user."""
Translation.objects.remove_for(self, locale)
@classmethod
def get_fallback(cls):
return cls._meta.get_field('lang')
def addons_for_collection_type(self, type_):
"""Return the addons for the given special collection type."""
from bandwagon.models import CollectionAddon
qs = CollectionAddon.objects.filter(
collection__author=self, collection__type=type_)
return qs.values_list('addon', flat=True)
@amo.cached_property
def mobile_addons(self):
return self.addons_for_collection_type(amo.COLLECTION_MOBILE)
@amo.cached_property
def favorite_addons(self):
return self.addons_for_collection_type(amo.COLLECTION_FAVORITES)
@amo.cached_property
def watching(self):
return self.collectionwatcher_set.values_list('collection', flat=True)
models.signals.pre_save.connect(save_signal, sender=UserProfile,
dispatch_uid='userprofile_translations')
@dispatch.receiver(models.signals.post_save, sender=UserProfile,
dispatch_uid='user.post_save')
def user_post_save(sender, instance, **kw):
if not kw.get('raw'):
from . import tasks
tasks.index_users.delay([instance.id])
@dispatch.receiver(models.signals.post_delete, sender=UserProfile,
dispatch_uid='user.post_delete')
def user_post_delete(sender, instance, **kw):
if not kw.get('raw'):
from . import tasks
tasks.unindex_users.delay([instance.id])
class UserNotification(amo.models.ModelBase):
user = models.ForeignKey(UserProfile, related_name='notifications')
notification_id = models.IntegerField()
enabled = models.BooleanField(default=False)
class Meta:
db_table = 'users_notifications'
@staticmethod
def update_or_create(update={}, **kwargs):
rows = UserNotification.objects.filter(**kwargs).update(**update)
if not rows:
update.update(dict(**kwargs))
UserNotification.objects.create(**update)
class BlacklistedName(amo.models.ModelBase):
"""Blacklisted User usernames and display_names + Collections' names."""
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'users_blacklistedname'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, name):
"""
Check to see if a given name is in the (cached) blacklist.
Return True if the name contains one of the blacklisted terms.
"""
name = name.lower()
qs = cls.objects.all()
def f():
return [n.lower() for n in qs.values_list('name', flat=True)]
blacklist = caching.cached_with(qs, f, 'blocked')
return any(n in name for n in blacklist)
class BlacklistedEmailDomain(amo.models.ModelBase):
"""Blacklisted user e-mail domains."""
domain = models.CharField(max_length=255, unique=True, default='',
blank=False)
def __unicode__(self):
return self.domain
@classmethod
def blocked(cls, domain):
qs = cls.objects.all()
def f():
return list(qs.values_list('domain', flat=True))
blacklist = caching.cached_with(qs, f, 'blocked')
# because there isn't a good way to know if the domain is
# "example.com" or "example.co.jp", we'll re-construct it...
# so if it's "bad.example.co.jp", the following check the
# values in ['bad.example.co.jp', 'example.co.jp', 'co.jp']
x = domain.lower().split('.')
for d in ['.'.join(x[y:]) for y in range(len(x) - 1)]:
if d in blacklist:
return True
class BlacklistedPassword(amo.models.ModelBase):
"""Blacklisted passwords"""
password = models.CharField(max_length=255, unique=True, blank=False)
def __unicode__(self):
return self.password
@classmethod
def blocked(cls, password):
return cls.objects.filter(password=password)
class UserHistory(amo.models.ModelBase):
email = models.EmailField()
user = models.ForeignKey(UserProfile, related_name='history')
class Meta:
db_table = 'users_history'
ordering = ('-created',)
@UserProfile.on_change
def watch_email(old_attr={}, new_attr={}, instance=None,
sender=None, **kw):
new_email, old_email = new_attr.get('email'), old_attr.get('email')
if old_email and new_email != old_email:
log.debug('Creating user history for user: %s' % instance.pk)
UserHistory.objects.create(email=old_email, user_id=instance.pk)
|
{
"content_hash": "f192bfc4d12a33e33be412e3cdb97793",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 79,
"avg_line_length": 36.63813813813814,
"alnum_prop": 0.6120650793000287,
"repo_name": "mdaif/olympia",
"id": "cb6a043806b65c97f18f3b852e2cb718aaa4a99b",
"size": "24401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/users/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "657480"
},
{
"name": "HTML",
"bytes": "1635828"
},
{
"name": "JavaScript",
"bytes": "1371668"
},
{
"name": "Makefile",
"bytes": "3926"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4017646"
},
{
"name": "Shell",
"bytes": "10337"
},
{
"name": "Smarty",
"bytes": "2179"
}
],
"symlink_target": ""
}
|
from a5288.reports import MissedCallbackReport
CUSTOM_REPORTS = (
('Custom Reports', (
MissedCallbackReport,
)),
)
|
{
"content_hash": "944e6ae1502abfb57bbd2a9fc2fb2803",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 18.857142857142858,
"alnum_prop": 0.6666666666666666,
"repo_name": "puttarajubr/commcare-hq",
"id": "d7b9bf54eda985ea87c9dee9de9e92d36b487185",
"size": "132",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "custom/_legacy/a5288/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
"""
Django settings for kuiqblog project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('kuiqblog')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'grappelli',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'ckeditor',
'easy_thumbnails',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'kuiqblog.users', # custom users app
# Your stuff: custom apps go here
'weblog',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'kuiqblog.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Hodonou SOUNTON""", 'sounton@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///kuiqblog"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Africa/Porto-Novo'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'weblog.context_processors.weblog',
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ('kuiqblog.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env("CELERY_BROKER_URL", default='django://')
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
|
{
"content_hash": "26f29dbbc03390f003af56c779361c85",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 114,
"avg_line_length": 36.15833333333333,
"alnum_prop": 0.6106245678727817,
"repo_name": "drxos/kuiqblog",
"id": "7ae54189587686ae6fc925340822c012e4f94d6e",
"size": "8702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "754359"
},
{
"name": "HTML",
"bytes": "3196566"
},
{
"name": "JavaScript",
"bytes": "3034785"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "74019"
},
{
"name": "Shell",
"bytes": "4951"
}
],
"symlink_target": ""
}
|
"""
Tests for `logsucker` module.
"""
import pytest
from logsucker import logsucker
class TestLogsucker(object):
@classmethod
def setup_class(cls):
pass
def test_something(self):
pass
@classmethod
def teardown_class(cls):
pass
|
{
"content_hash": "846e01ed0ae214211eb0ac984362cbb6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 31,
"avg_line_length": 14.526315789473685,
"alnum_prop": 0.6376811594202898,
"repo_name": "qba73/logsucker",
"id": "07584dfeb02c7d8d579fd97e4660b5357fc0d592",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_logsucker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "1993"
}
],
"symlink_target": ""
}
|
import numpy
from chainer import backend
from chainer import initializer
from chainer import utils
_orthogonal_constraints = { # (assert emb., assert proj.)
'auto': (False, False),
'projection': (False, True),
'embedding': (True, False),
'basis': (True, True),
}
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
class Orthogonal(initializer.Initializer):
"""Initializes array with an orthogonal system.
This initializer first makes a matrix of the same shape as the
array to be initialized whose elements are drawn independently from
standard Gaussian distribution.
Next, it applies QR decomposition to (the transpose of) the matrix.
To make the decomposition (almost surely) unique, we require the diagonal
of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,
https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).
Then, it initializes the array with the (semi-)orthogonal matrix Q.
Finally, the array is multiplied by the constant ``scale``.
If the ``ndim`` of the input array is more than 2, we consider the array
to be a matrix by concatenating all axes except the first one.
The number of vectors consisting of the orthogonal system
(i.e. first element of the shape of the array) must be equal to or smaller
than the dimension of each vector (i.e. second element of the shape of
the array).
Attributes:
scale (float): A constant to be multiplied by.
dtype: Data type specifier.
mode (str): Assertion on the initialized shape.
``'auto'`` (default), ``'projection'`` (before v7),
``'embedding'``, or ``'basis'``.
Reference: Saxe et al., https://arxiv.org/abs/1312.6120
"""
def __init__(self, scale=1.1, dtype=None, mode='auto'):
self.scale = scale
self.mode = mode
try:
self._checks = _orthogonal_constraints[mode]
except KeyError:
raise ValueError(
'Invalid mode: {}. Choose from {}.'.format(
repr(mode),
', '.join(repr(m) for m in _orthogonal_constraints)))
super(Orthogonal, self).__init__(dtype)
# TODO(Kenta Oono)
# How do we treat overcomplete base-system case?
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype
xp = backend.get_array_module(array)
if not array.shape: # 0-dim case
array[...] = self.scale * (2 * numpy.random.randint(2) - 1)
elif not array.size:
raise ValueError('Array to be initialized must be non-empty.')
else:
# numpy.prod returns float value when the argument is empty.
out_dim = len(array)
in_dim = utils.size_of_shape(array.shape[1:])
if (in_dim > out_dim and self._checks[0]) or (
in_dim < out_dim and self._checks[1]):
raise ValueError(
'Cannot make orthogonal {}.'
'shape = {}, interpreted as '
'{}-dim input and {}-dim output.'.format(
self.mode, array.shape, in_dim, out_dim))
transpose = in_dim > out_dim
a = numpy.random.normal(size=(out_dim, in_dim))
if transpose:
a = a.T
# cupy.linalg.qr requires cusolver in CUDA 8+
q, r = numpy.linalg.qr(a)
q *= numpy.copysign(self.scale, numpy.diag(r))
if transpose:
q = q.T
array[...] = xp.asarray(q.reshape(array.shape))
|
{
"content_hash": "0683e1c88073fd965dd8073d13a3f894",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 39.774193548387096,
"alnum_prop": 0.5988104893214382,
"repo_name": "tkerola/chainer",
"id": "e15dade78331c0d33167558aeea403fe8ee6948f",
"size": "3699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/initializers/orthogonal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
}
|
"""Provides functionality to notify people."""
from __future__ import annotations
import asyncio
import voluptuous as vol
import homeassistant.components.persistent_notification as pn
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import ( # noqa: F401
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
DOMAIN,
NOTIFY_SERVICE_SCHEMA,
PERSISTENT_NOTIFICATION_SERVICE_SCHEMA,
SERVICE_NOTIFY,
SERVICE_PERSISTENT_NOTIFICATION,
)
from .legacy import ( # noqa: F401
BaseNotificationService,
async_reload,
async_reset_platform,
async_setup_legacy,
check_templates_warn,
)
# Platform specific data
ATTR_TITLE_DEFAULT = "Home Assistant"
PLATFORM_SCHEMA = vol.Schema(
{vol.Required(CONF_PLATFORM): cv.string, vol.Optional(CONF_NAME): cv.string},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the notify services."""
platform_setups = async_setup_legacy(hass, config)
# We need to add the component here break the deadlock
# when setting up integrations from config entries as
# they would otherwise wait for notify to be
# setup and thus the config entries would not be able to
# setup their platforms, but we need to do it after
# the dispatcher is connected so we don't miss integrations
# that are registered before the dispatcher is connected
hass.config.components.add(DOMAIN)
if platform_setups:
await asyncio.wait([asyncio.create_task(setup) for setup in platform_setups])
async def persistent_notification(service: ServiceCall) -> None:
"""Send notification via the built-in persistent_notify integration."""
message = service.data[ATTR_MESSAGE]
message.hass = hass
check_templates_warn(hass, message)
title = None
if title_tpl := service.data.get(ATTR_TITLE):
check_templates_warn(hass, title_tpl)
title_tpl.hass = hass
title = title_tpl.async_render(parse_result=False)
pn.async_create(hass, message.async_render(parse_result=False), title)
hass.services.async_register(
DOMAIN,
SERVICE_PERSISTENT_NOTIFICATION,
persistent_notification,
schema=PERSISTENT_NOTIFICATION_SERVICE_SCHEMA,
)
return True
|
{
"content_hash": "983c41574b4ae5911c4e42a7ea214aed",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 85,
"avg_line_length": 31.475,
"alnum_prop": 0.7088959491660047,
"repo_name": "mezz64/home-assistant",
"id": "52864dd001d65e4cac57bc3d7818ae24b069adb0",
"size": "2518",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "red", "green", "yellow", "blue",
"magenta", "cyan", "gray"]
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
"brightmagenta", "brightcyan", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%im" % (60 + x)
x += 1
del d, l, x
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
|
{
"content_hash": "3056865967fd5c403747ede284abcec8",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 88,
"avg_line_length": 24.242857142857144,
"alnum_prop": 0.5427224513847967,
"repo_name": "tmm1/pygments.rb",
"id": "8dd08abebce13dd43c7e5340e0f91d8c6cf400b4",
"size": "1697",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "vendor/pygments-main/pygments/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "99523"
},
{
"name": "Python",
"bytes": "26555"
},
{
"name": "Ruby",
"bytes": "32068"
}
],
"symlink_target": ""
}
|
from os.path import abspath, dirname, basename, join
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_PATH = abspath(dirname(__file__))
PROJECT_NAME = basename(ROOT_PATH)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
ADMIN_MEDIA_PREFIX = '/admin-media/'
MEDIA_URL = ''
SECRET_KEY = 't2eo^kd%k+-##ml3@_x__$j0(ps4p0q6eg*c4ttp9d2n(t!iol'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
join(ROOT_PATH, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'social_auth',
'app',
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'social_auth.backends.facebook.FacebookBackend',
'social_auth.backends.google.GoogleOAuthBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'social_auth.backends.google.GoogleBackend',
'social_auth.backends.yahoo.YahooBackend',
'social_auth.backends.contrib.linkedin.LinkedinBackend',
'social_auth.backends.OpenIDBackend',
'social_auth.backends.contrib.livejournal.LiveJournalBackend',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'social_auth.context_processors.social_auth_by_type_backends',
)
#SOCIAL_AUTH_ENABLED_BACKENDS = ('google', 'google-oauth', 'facebook')
LOGIN_REDIRECT_URL = '/'
try:
from local_settings import *
except:
pass
|
{
"content_hash": "04e556b91d0a9e53357f10d998402f27",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 70,
"avg_line_length": 26.02127659574468,
"alnum_prop": 0.7072771872444807,
"repo_name": "thesealion/django-social-auth",
"id": "93267dfa4aef73ffcdece7f12f34ba7289eaf364",
"size": "2446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8651"
},
{
"name": "Python",
"bytes": "127716"
}
],
"symlink_target": ""
}
|
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
if sys.version_info < (3,):
exit()
import time
import unittest
from . import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "2260ad7fa25e7d9c830d3f71340cca76",
"timestamp": "",
"source": "github",
"line_count": 871,
"max_line_length": 408,
"avg_line_length": 47.956371986222734,
"alnum_prop": 0.6454393105099354,
"repo_name": "kkujawinski/sublime-text-3-live-demo",
"id": "bece5ef3c4688ed5727b065b7edfd1736c4af37b",
"size": "41792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diff_match_patch_test_2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3557"
},
{
"name": "Lasso",
"bytes": "1815"
},
{
"name": "Python",
"bytes": "176411"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.11.0':
raise RuntimeError("CMake >= 3.11.0 is required")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='maestro',
version='0.1.0',
author='Mojocorp',
author_email='morganleborgne@gmail.com',
description='Control of Pololu Maestro servo control board',
long_description='',
ext_modules=[CMakeExtension('maestro')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
|
{
"content_hash": "423e2afd11fecd8e21694b8519336d40",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 98,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.5829545454545455,
"repo_name": "papabricole/Pololu_Maestro",
"id": "28e21a5a71322ae235535bc88cae47be5bd24c0a",
"size": "2640",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28134"
},
{
"name": "C++",
"bytes": "28531"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for processing Lammps output files:
1. log file: contains the thermodynamic data with the format set by the
'thermo_style' command
2. trajectory file(dump file): the file generated by the 'dump' command
Restrictions:
The first 2 fields of the ATOMS section in the trajectory(dump) file
must be the atom id and the atom type. There can be arbitrary number
of fields after that and they all will be treated as floats and
updated based on the field names in the ITEM: ATOMS line.
"""
import re
import os
import glob
from io import open, StringIO
import numpy as np
from monty.json import MSONable
from monty.io import zopen
from pymatgen.core.periodic_table import _pt_data
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.analysis.diffusion_analyzer import DiffusionAnalyzer
from pymatgen.io.lammps.data import LammpsBox, LammpsData
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
__credits__ = "Navnidhi Rajput, Michael Humbert"
# TODO write parser for one and multi thermo_styles
class LammpsLog(MSONable):
"""
Parser for LAMMPS log file.
"""
def __init__(self, log_file="log.lammps"):
"""
Args:
log_file (string): path to the log file
"""
self.log_file = os.path.abspath(log_file)
self.timestep = -1
self._parse_log()
def _parse_log(self):
"""
Parse the log file for run and thermodynamic data.
Sets the thermodynamic data as a structured numpy array with field names
taken from the custom thermo_style command. thermo_style one and multi
are not supported yet
"""
thermo_data = []
fixes = []
d_build = None
thermo_pattern = None
with open(self.log_file, 'r') as logfile:
for line in logfile:
# timestep, the unit depedns on the 'units' command
time = re.search(r'timestep\s+([0-9]+)', line)
if time and not d_build:
self.timestep = float(time.group(1))
# total number md steps
steps = re.search(r'run\s+([0-9]+)', line)
if steps and not d_build:
self.nmdsteps = int(steps.group(1))
# simulation info
fix = re.search(r'fix.+', line)
if fix and not d_build:
fixes.append(fix.group())
# dangerous builds
danger = re.search(r'Dangerous builds\s+([0-9]+)', line)
if danger and not d_build:
d_build = int(steps.group(1))
# logging interval
thermo = re.search(r'thermo\s+([0-9]+)', line)
if thermo and not d_build:
self.interval = float(thermo.group(1))
# thermodynamic data, set by the thermo_style command
fmt = re.search(r'thermo_style.+', line)
if fmt and not d_build:
thermo_type = fmt.group().split()[1]
fields = fmt.group().split()[2:]
no_parse = ["one", "multi"]
if thermo_type in no_parse:
thermo_data.append("cannot parse thermo_style")
else:
thermo_pattern_string = r"\s*([0-9eE\.+-]+)" + "".join(
[r"\s+([0-9eE\.+-]+)" for _ in range(len(fields) - 1)])
thermo_pattern = re.compile(thermo_pattern_string)
if thermo_pattern:
if thermo_pattern.search(line):
m = thermo_pattern.search(line)
thermo_data.append(tuple([float(x) for x in m.groups()]))
if thermo_data:
if isinstance(thermo_data[0], str):
self.thermo_data = [thermo_data]
else:
# numpy arrays are easier to reshape, previously we used np.array with dtypes
self.thermo_data = {
fields[i]: [thermo_data[j][i] for j in range(len(thermo_data))]
for i in range(len(fields))}
self.fixes = fixes
self.dangerous_builds = d_build
def as_dict(self):
d = {}
for attrib in [a for a in dir(self)
if not a.startswith('__') and not callable(getattr(self, a))]:
d[attrib] = getattr(self, attrib)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
# not really needed ?
@classmethod
def from_dict(cls, d):
return cls(log_file=d["log_file"])
class LammpsDump(MSONable):
"""
Dump file parser.
.. attribute:: steps
All steps in the dump as a list of
{"timestep": current timestep,
"natoms": no. of atoms,
"box": simulation box (optional),
"atoms_data": dumped data for atoms as 2D np.array}
.. attribute:: timesteps
List of timesteps in sequence.
"""
def __init__(self, filename, parse_box=True, dtype=float):
"""
Args:
filename (str): Filename to parse. The timestep wildcard
('*') is supported and the files are parsed in the
sequence of timestep.
parse_box (bool): Whether parse box info for each step.
Default to True.
dtype: np.dtype for atoms data array.
"""
self.filename = filename
self.parse_box = parse_box
self.dtype = dtype
fnames = glob.glob(self.filename)
if len(fnames) > 1:
pattern = r"%s" % filename.replace("*", "([0-9]+)")
pattern = pattern.replace("\\", "\\\\")
fnames = sorted(fnames,
key=lambda f: int(re.match(pattern, f).group(1)))
steps = []
for fname in fnames:
with zopen(fname, "rt") as f:
run = f.read()
dumps = run.split("ITEM: TIMESTEP")[1:]
steps.extend([self._parse_timestep(d) for d in dumps])
self.steps = steps
self.timesteps = [s["timestep"] for s in self.steps]
def __len__(self):
return len(self.timesteps)
def __getitem__(self, ind):
return self.steps[ind]
def _parse_timestep(self, dump):
step = {}
lines = dump.split("\n")
step["timestep"] = int(lines[1])
step["natoms"] = int(lines[3])
step["atoms_data"] = np.loadtxt(StringIO("\n".join(lines[9:])),
dtype=self.dtype)
if self.parse_box:
box_arr = np.loadtxt(StringIO("\n".join(lines[5:8])))
bounds = box_arr[:, :2]
tilt = None
if "xy xz yz" in lines[4]:
tilt = box_arr[:, 2]
x = (0, tilt[0], tilt[1], tilt[0] + tilt[1])
y = (0, tilt[2])
bounds -= np.array([[min(x), max(x)], [min(y), max(y)],
[0, 0]])
step["box"] = LammpsBox(bounds, tilt)
return step
def as_dict(self):
d = {"filename": self.filename}
json_steps = []
for step in self.steps:
json_step = {"timestep": step["timestep"],
"natoms": step["natoms"]}
json_step["atoms_data"] = step["atoms_data"].tolist()
if self.parse_box:
json_step["box"] = step["box"].as_dict()
json_steps.append(json_step)
d["steps"] = json_steps
return d
# TODO: @wood-b simplify this, use LammpsDump to parse + use mdanalysis to process.
# make sure its backward compatible
class LammpsRun(MSONable):
"""
Parse the lammps data file, trajectory(dump) file and the log file to extract
useful info about the system.
Note: In order to parse trajectory or dump file, the first 2 fields must be
the id and the atom type. There can be arbitrary number of fields after
that and they all will be treated as floats.
Args:
data_file (str): path to the data file
trajectory_file (str): path to the trajectory file or dump file
log_file (str): path to the log file
"""
def __init__(self, data_file, trajectory_file, log_file="log.lammps"):
self.data_file = os.path.abspath(data_file)
self.trajectory_file = os.path.abspath(trajectory_file)
self.log_file = os.path.abspath(log_file)
self.log = LammpsLog(log_file)
self.lammps_data = LammpsData.from_file(self.data_file)
self._set_mol_masses_and_charges()
self._parse_trajectory()
def _parse_trajectory(self):
"""
parse the trajectory file.
"""
traj_timesteps = []
trajectory = []
timestep_label = "ITEM: TIMESTEP"
# "ITEM: ATOMS id type ...
traj_label_pattern = re.compile(
r"^\s*ITEM:\s+ATOMS\s+id\s+type\s+([A-Za-z0-9[\]_\s]*)")
# default: id type x y z vx vy vz mol"
# updated below based on the field names in the ITEM: ATOMS line
# Note: the first 2 fields must be the id and the atom type. There can
# be arbitrary number of fields after that and they all will be treated
# as floats.
traj_pattern = re.compile(
r"\s*(\d+)\s+(\d+)\s+([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+(\d+)\s*")
parse_timestep = False
with open(self.trajectory_file) as tf:
for line in tf:
if timestep_label in line:
parse_timestep = True
continue
if parse_timestep:
traj_timesteps.append(float(line))
parse_timestep = False
if traj_label_pattern.search(line):
fields = traj_label_pattern.search(line).group(1)
fields = fields.split()
# example:- id type x y z vx vy vz mol ...
traj_pattern_string = r"\s*(\d+)\s+(\d+)" + "".join(
[r"\s+([0-9eE\.+-]+)" for _ in range(len(fields))])
traj_pattern = re.compile(traj_pattern_string)
if traj_pattern.search(line):
# first 2 fields must be id and type, the rest of them
# will be casted as floats
m = traj_pattern.search(line)
line_data = []
line_data.append(int(m.group(1)) - 1)
line_data.append(int(m.group(2)))
line_data.extend(
[float(x) for i, x in enumerate(m.groups()) if
i + 1 > 2])
trajectory.append(tuple(line_data))
traj_dtype = np.dtype([(str('Atoms_id'), np.int64),
(str('atom_type'), np.int64)] +
[(str(fld), np.float64) for fld in fields])
self.trajectory = np.array(trajectory, dtype=traj_dtype)
self.timesteps = np.array(traj_timesteps, dtype=np.float64)
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
self.trajectory[begin:end] = np.sort(self.trajectory[begin:end],
order=str("Atoms_id"))
def _set_mol_masses_and_charges(self):
"""
set the charge, mass and the atomic makeup for each molecule
"""
mol_config = [] # [ [atom id1, atom id2, ...], ... ]
mol_masses = [] # [ [atom mass1, atom mass2, ...], ... ]
# mol_charges = []
unique_atomic_masses = self.lammps_data.masses["mass"].values
mol_ids = self.lammps_data.atoms["molecule-ID"]
atom_ids = self.lammps_data.atoms.index
atomic_types = self.lammps_data.atoms["type"]
unique_mol_ids = np.unique(mol_ids)
atomic_masses = unique_atomic_masses[np.array(atomic_types) - 1]
self.nmols = unique_mol_ids.size
for umid in range(self.nmols):
mol_config.append(np.array(atom_ids)[np.where(mol_ids == umid + 1)] - 1)
mol_masses.append(atomic_masses[np.where(mol_ids == umid + 1)])
self.mol_config = np.array(mol_config)
self.mol_masses = np.array(mol_masses)
def _weighted_average(self, mol_id, mol_vector):
"""
Calculate the weighted average of the array comprising of
atomic vectors corresponding to the molecule with id mol_id.
Args:
mol_id (int): molecule id
mol_vector (numpy array): array of shape,
natoms_in_molecule with id mol_id x 3
Returns:
1D numpy array(3 x 1) of weighted averages in x, y, z directions
"""
mol_masses = self.mol_masses[mol_id]
return np.array(
[np.dot(mol_vector[:, dim], mol_masses) / np.sum(mol_masses)
for dim in range(3)])
def _get_mol_vector(self, step, mol_id, param=("x", "y", "z")):
"""
Returns numpy array corresponding to atomic vectors of parameter
"param" for the given time step and molecule id
Args:
step (int): time step
mol_id (int): molecule id
param (list): the atomic parameter for which the weighted
average is to be computed
Returns:
2D numpy array(natoms_in_molecule x 3) of atomic vectors
"""
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][self.mol_config[mol_id]][param]
mol_vector = np.array(mol_vector_structured.tolist())
return mol_vector.copy()
# TODO: remove this and use only get_displacements(an order of magnitude faster)
def get_structures_from_trajectory(self):
"""
Convert the coordinates in each time step to a structure(boxed molecule).
Used to construct DiffusionAnalyzer object.
Returns:
list of Structure objects
"""
lattice = Lattice([[self.box_lengths[0], 0, 0],
[0, self.box_lengths[1], 0],
[0, 0, self.box_lengths[2]]])
structures = []
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = self.lammps_data.masses["mass"].values
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
mol_vector = np.array(mol_vector_structured.tolist())
coords = mol_vector.copy()
species = [mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
try:
structure = Structure(lattice, species, coords,
coords_are_cartesian=True)
except ValueError as error:
print("Error: '{}' at timestep {} in the trajectory".format(
error,
int(self.timesteps[step])))
structures.append(structure)
return structures
def get_displacements(self):
"""
Return the initial structure and displacements for each time step.
Used to interface with the DiffusionAnalyzer.
Returns:
Structure object, numpy array of displacements
"""
lattice = Lattice([[self.box_lengths[0], 0, 0],
[0, self.box_lengths[1], 0],
[0, 0, self.box_lengths[2]]])
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = self.lammps_data.masses["mass"].values
frac_coords = []
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
mol_vector = np.array(mol_vector_structured.tolist())
coords = mol_vector.copy()
if step == 0:
species = [
mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
structure = Structure(lattice, species, coords,
coords_are_cartesian=True)
step_frac_coords = [lattice.get_fractional_coords(crd)
for crd in coords]
frac_coords.append(np.array(step_frac_coords)[:, None])
frac_coords = np.concatenate(frac_coords, axis=1)
dp = frac_coords[:, 1:] - frac_coords[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
disp = lattice.get_cartesian_coords(f_disp)
return structure, disp
def get_diffusion_analyzer(self, specie, temperature, time_step, step_skip,
**kwargs):
"""
Args:
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
For the other parameters please see the
pymatgen.analysis.diffusion_analyzer.DiffusionAnalyzer documentation.
Returns:
DiffusionAnalyzer
"""
# structures = self.get_structures_from_trajectory()
structure, disp = self.get_displacements()
return DiffusionAnalyzer(structure, disp, specie, temperature,
time_step, step_skip=step_skip,
**kwargs)
@property
def natoms(self):
return len(self.lammps_data.atoms)
@property
def box_lengths(self):
return [l[1] - l[0] for l in self.lammps_data.box.bounds]
@property
def traj_timesteps(self):
"""
trajectory time steps in time units.
e.g. for units = real, time units = fmsec
"""
return self.timesteps * self.log.timestep
@property
def mol_trajectory(self):
"""
Compute the weighted average trajectory of each molecule at each
timestep
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
traj = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_coords = self._get_mol_vector(step, mol_id,
param=["x", "y", "z"])
# take care of periodic boundary conditions
pbc_wrap(mol_coords, self.box_lengths)
tmp_mol.append(self._weighted_average(mol_id, mol_coords))
traj.append(tmp_mol)
return np.array(traj)
@property
def mol_velocity(self):
"""
Compute the weighted average velcoity of each molecule at each
timestep.
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
velocity = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_velocities = self._get_mol_vector(step, mol_id,
param=["vx", "vy", "vz"])
tmp_mol.append(self._weighted_average(mol_id, mol_velocities))
velocity.append(tmp_mol)
return np.array(velocity)
def as_dict(self):
d = {}
skip = ["mol_velocity", "mol_trajectory"] # not applicable in general
attributes = [a for a in dir(self) if a not in skip and not a.startswith('__')]
attributes = [a for a in attributes if not callable(getattr(self, a))]
for attrib in attributes:
obj = getattr(self, attrib)
if isinstance(obj, MSONable):
d[attrib] = obj.as_dict()
elif isinstance(obj, np.ndarray):
d[attrib] = obj.tolist()
else:
d[attrib] = obj
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
# not really needed ?
@classmethod
def from_dict(cls, d):
return cls(data_file=d["data_file"], trajectory_file=d["trajectory_file"],
log_file=d["log_file"])
def pbc_wrap(array, box_lengths):
"""
wrap the array for molecule coordinates around the periodic boundary.
Args:
array (numpy.ndarray): molecule coordinates, [[x1,y1,z1],[x2,y2,z2],..]
box_lengths (list): [x_length, y_length, z_length]
"""
ref = array[0, 0]
for i in range(3):
array[:, i] = np.where((array[:, i] - ref) >= box_lengths[i] / 2,
array[:, i] - box_lengths[i], array[:, i])
array[:, i] = np.where((array[:, i] - ref) < -box_lengths[i] / 2,
array[:, i] + box_lengths[i], array[:, i])
|
{
"content_hash": "638a6da2c6a581ff7144e8a01f890f4b",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 93,
"avg_line_length": 39.419064748201436,
"alnum_prop": 0.5308208240178857,
"repo_name": "nisse3000/pymatgen",
"id": "b3e073e502a6034b5110d50bc9e5ae0f1cea6b19",
"size": "22027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/lammps/output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6934548"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
}
|
import wx
from wx.lib.wordwrap import wordwrap
import wx.grid as gridlib
import os
import sys
__program__ = sys.modules['__main__'].__program__
__version__ = sys.modules['__main__'].__version__
import re
PTN_TITLE = re.compile('(?P<author>[^\[]+)[\]-](?P<title>.+)')
#--------------------------------------------------
class MyFrame(wx.Frame):
""" We simply derive a new class of Frame. """
def __init__(self, parent, title, config):
wx.Frame.__init__(self, parent, title=title)
panel = wx.Panel(self, wx.ID_ANY)
# default option
self.config = config
self.dirname = os.curdir
self.scrap = [] # scrap result
self.tmpldir = os.path.join("template", "epub2")
self.targetcss = "target/%s.css" % self.config['TargetCSS']
# init
self.SetScraper()
# top button bar
btn1 = wx.Button(panel, wx.ID_ANY, u"읽어오기") # Import
btn2 = wx.Button(panel, wx.ID_ANY, u"제거") # Remove
btn3 = wx.Button(panel, wx.ID_ANY, u"책정보읽기") # Scrap
btn4 = wx.Button(panel, wx.ID_ANY, u"변환") # Convert
btn5 = wx.Button(panel, wx.ID_ANY, u"설정") # Option
btn6 = wx.Button(panel, wx.ID_ANY, u"정보") # About
btn7 = wx.Button(panel, wx.ID_ANY, u"나가기") # Exit
self.Bind(wx.EVT_BUTTON, self.runImport, btn1)
self.Bind(wx.EVT_BUTTON, self.runRemove, btn2)
self.Bind(wx.EVT_BUTTON, self.runScrap, btn3)
self.Bind(wx.EVT_BUTTON, self.runConvert, btn4)
self.Bind(wx.EVT_BUTTON, self.OnOption, btn5)
self.Bind(wx.EVT_BUTTON, self.OnAbout, btn6)
self.Bind(wx.EVT_BUTTON, self.OnClose, btn7)
self.Bind(wx.EVT_CLOSE, self.OnExit)
btnszer1 = wx.BoxSizer(wx.HORIZONTAL)
for btn in [btn1, btn2, btn3, btn4, btn5, btn6, btn7]:
btnszer1.Add(btn, 1, wx.LEFT|wx.RIGHT, 2)
# second bar
#selbtn1 = wx.Button(panel, wx.ID_ANY, u"전체선택")
#selbtn2 = wx.Button(panel, wx.ID_ANY, u"전체취소")
#self.Bind(wx.EVT_BUTTON, self.SelectAll, selbtn1)
#self.Bind(wx.EVT_BUTTON, self.SelectNone, selbtn2)
# target directory
ddchk = wx.CheckBox(panel, wx.ID_ANY, u"출력위치")
ddchk.SetValue( self.config['UseDestDir'] )
dtb1 = wx.TextCtrl(panel, value=self.config['DestDir'])
ddbtn = wx.Button(panel, wx.ID_ANY, u"선택")
self.Bind(wx.EVT_CHECKBOX, self.OnUseDestDir, ddchk)
self.Bind(wx.EVT_BUTTON, self.OnDestDirSelect, ddbtn)
ddszer = wx.BoxSizer(wx.HORIZONTAL)
ddszer.Add(ddchk, 0, wx.CENTER|wx.LEFT, 5)
ddszer.Add(dtb1, 1, wx.GROW|wx.LEFT|wx.RIGHT, 2)
ddszer.Add(ddbtn, 0, wx.LEFT|wx.RIGHT, 2)
btnszer2 = wx.BoxSizer(wx.HORIZONTAL)
#btnszer2.Add(selbtn1, 0, wx.LEFT, 1)
#btnszer2.Add(selbtn2, 0, wx.LEFT, 1)
#btnszer2.Add( (20,10) )
btnszer2.Add(ddszer, 0, wx.RIGHT, 1)
# file table
self.grid = MyGrid(panel)
self.grid.SetDropTarget( FileDropTarget(self) );
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(btnszer1, 0, wx.GROW|wx.LEFT|wx.RIGHT, 5)
sizer.Add(btnszer2, 0, wx.GROW|wx.LEFT|wx.RIGHT, 5)
sizer.Add(self.grid, 1, wx.GROW|wx.ALL, 5)
self.panel = panel
self.dirbox = dtb1
panel.SetSizer(sizer)
sizer.Fit(self)
self.Show(True)
(h,w) = self.GetSize()
self.SetSize( (h,w+300) )
def SelectAll(self, evt):
for row in range(self.grid.table.GetNumberRows()):
self.grid.table.SetValue(row, 0, True)
def SelectNone(self, evt):
for row in range(self.grid.table.GetNumberRows()):
self.grid.table.SetValue(row, 0, False)
def runImport(self, evt):
# multi-file open dialogue
""" Open a file"""
dlg = wx.FileDialog(self, "Choose files", self.dirname, "", "*.txt;*.md", wx.FD_OPEN|wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
self.dirname = dlg.GetDirectory()
for fname in dlg.GetFilenames():
self.loadFile(fname)
dlg.Destroy()
def loadFile(self, fname):
import txtformat
from ftxt2markdown import extract_meta
newrow = self.grid.table.GetNumberRows()
self.scrap.append({'file':fname,'dir':self.dirname})
# add new row
self.grid.table.SetValue( newrow, 0, True )
self.grid.table.SetValue( newrow, 1, fname )
# 1) try to fetch directive inside
text = txtformat.load( os.path.join(self.dirname,fname) )
info = extract_meta(text)
if 'title' in info:
self.grid.table.SetValue( newrow, 2, info['title'] )
if 'author' in info:
self.grid.table.SetValue( newrow, 3, info['author'] )
if 'isbn' in info:
self.grid.table.SetValue( newrow, 4, info['isbn'] )
if 'cover_url' in info:
self.grid.table.SetValue( newrow, 5, info['cover_url'] )
# 2) [fallback] guess title & author from filename
if not 'title' in info and not 'author' in info:
fname2 = os.path.splitext(os.path.basename(fname))[0]
query = PTN_TITLE.search(fname2)
if query:
title = query.group('title').strip()
author = query.group('author').strip()
if title:
info['title'] = title.replace('_',' ')
if author:
info['author'] = author.replace('_',' ')
self.scrap[newrow]['info'] = info
def runRemove(self, evt):
cntl = range(self.grid.table.GetNumberRows())
cntl.reverse()
for row in cntl:
if self.grid.table.GetValue(row, 0):
self.scrap.pop(row)
self.grid.table.DeleteRow(row)
def runScrap(self, evt):
# get total count to do
cnt = 0
for row in range(self.grid.table.GetNumberRows()):
if self.grid.table.GetValue(row, 0): # selected
cnt += 1
if cnt == 0:
return
maxcnt = cnt
dlg = wx.ProgressDialog(u"Book Scrapping",
u"책정보 가져오기",
maximum = maxcnt,
parent = self,
style = wx.PD_CAN_ABORT
| wx.PD_AUTO_HIDE
| wx.PD_APP_MODAL
)
# scrapping
cnt = 0
keepGoing = True
for row in range(self.grid.table.GetNumberRows()):
if self.grid.table.GetValue(row, 0): # selected
fname = self.grid.table.GetValue(row, 1)
fname = os.path.splitext(os.path.split(fname)[1])[0]
title = self.grid.table.GetValue(row, 2)
isbn = self.grid.table.GetValue(row, 4)
# scrapping main
info = None
if isbn:
(keepGoing, skip) = dlg.Update(cnt, u"%s 검색중" % isbn)
if not keepGoing: break
info = self.scraper.fetch(isbn)
else:
keywd = title if title else fname.replace('_',' ')
(keepGoing, skip) = dlg.Update(cnt, u"%s 검색중" % keywd)
if not keepGoing: break
rslt = self.scraper.search( keywd, maxresult=1 )
if rslt:
info = rslt[0] # first result
if info is None:
info = self.scraper.default_value
elif self.config['TryHiresImage']:
import scraper.kyobo_scraper
img_url = scraper.kyobo_scraper.book_scraper().get_hires_image(info['isbn'])
if img_url:
print u"change cover_url to %s" % img_url
info['cover_url'] = img_url
# copy result
if self.scrap[row]['info'] is None:
self.scrap[row]['info'] = dict()
for key,val in info.items():
if not self.config['PreserveUserMeta'] or not key in self.scrap[row]['info']:
self.scrap[row]['info'][key] = val
# display
self.grid.table.SetValue(row, 2, self.scrap[row]['info']['title'])
self.grid.table.SetValue(row, 3, self.scrap[row]['info']['author'])
self.grid.table.SetValue(row, 4, self.scrap[row]['info']['isbn'])
self.grid.table.SetValue(row, 5, self.scrap[row]['info']['cover_url'])
cnt += 1
if keepGoing:
dlg.Update(cnt, u"완료")
else:
dlg.Update(maxcnt, u"취소")
def runConvert(self, evt):
import txtformat
from ftxt2markdown import insert_meta
# get total count to do
cnt = 0
for row in range(self.grid.table.GetNumberRows()):
if self.grid.table.GetValue(row, 0): # selected
cnt += 1
if cnt == 0:
return
maxcnt = cnt
dlg = wx.ProgressDialog(u"Book Conversion",
u"ePub 변환",
maximum = maxcnt,
parent = self,
style = wx.PD_CAN_ABORT
| wx.PD_APP_MODAL
)
# scrapping
cnt = 0
keepGoing = True
for row in range(self.grid.table.GetNumberRows()):
if self.grid.table.GetValue(row, 0): # selected
# load
txtfile = os.path.join( self.scrap[row]['dir'], self.scrap[row]['file'] )
text = txtformat.load(txtfile)
if self.config['ReformatText']:
correct_word_break = self.config['CorrectWordBreak']
if not correct_word_break:
correct_word_break = None
text = txtformat.clean(text,
correct_word_break=correct_word_break,
guess_chapter=self.config['GuessChapter'],
guess_parasep=self.config['GuessParaSep'])
info = self.scrap[row]['info']
info['title'] = self.grid.table.GetValue(row, 2)
if not info['title']: del info['title']
info['author'] = self.grid.table.GetValue(row, 3)
if not info['author']: del info['author']
info['isbn'] = self.grid.table.GetValue(row, 4)
if not info['isbn']: del info['isbn']
info['cover_url'] = self.grid.table.GetValue(row, 5)
if not info['cover_url']: del info['cover_url']
info['language'] = 'Korean'
atxt = insert_meta(text, info)
dlgtit = u'<미지정>'
if 'title' in info:
dlgtit = info['title']
(keepGoing, skip) = dlg.Update(cnt, u"%s 변환중" % dlgtit)
if not keepGoing: break
# output
if self.config['UseTitleInOutputName']:
filebase = info['title']
else:
filebase = os.path.splitext( os.path.basename(txtfile) )[0]
if filebase.endswith('.md'): # markdown notifier
filebase = filebase[:-3]
if self.config['UseDestDir']:
out_nex = os.path.join(self.config['DestDir'], filebase)
else:
out_nex = os.path.join(os.path.dirname(txtfile), filebase)
# generate
if self.config['OutputEPub']:
from markdown2epub import markdown2epub
epubfile = out_nex+'.epub'
markdown2epub(atxt, epubfile,
target_css = self.targetcss,
template_dir = self.tmpldir,
src_dir = os.path.dirname(txtfile),
fontfile=self.config['FontFile'],
tocLevel=self.config['MaxBrowseLevel'],
skipTo1st = self.config['SkipToFirstChapter'],
splitLargeText = self.config['SplitLargeText'],
graphicSeparator=self.config['GraphicSeparator'],
maxImageSize=(self.config['MaxImageWidth'], self.config['MaxImageHeight']) )
print u"%s is generated" % epubfile
if self.config['OutputMarkdown']:
open(out_nex+'.md.txt', 'w').write( atxt.encode('utf-8-sig') )
if self.config['OutputPDF']:
from markdown2pdf import markdown2pdf
pdffile = out_nex+'.pdf'
markdown2pdf(atxt, pdffile,
cssfile='xhtml2pdf.css',
fontfile=self.config['FontFile'],
src_dir = os.path.dirname(txtfile),
skipTo1st = self.config['SkipToFirstChapter'] )
print u"%s is generated" % pdffile
cnt += 1
if keepGoing:
dlg.Update(cnt, u"변환완료: %d개" % cnt)
else:
dlg.Update(maxcnt, u"변환취소: %d개" % cnt)
def OnOption(self, evt):
dlg = MyOption(self.panel, u"설정", self.config)
dlg.CenterOnScreen()
val = dlg.ShowModal()
if val == wx.ID_OK:
dlg.UpdateConfig(self.config)
self.SetScraper()
self.targetcss = "target/%s.css" % self.config['TargetCSS']
dlg.Destroy()
def OnAbout(self, evt):
info = wx.AboutDialogInfo()
info.Name = __program__
info.Version = __version__
info.Copyright = "(c) 2010, follows MIT License Policy"
info.Description = wordwrap(
u"한글 텍스트를 ePub으로 변환\n\n"
u"읽어오기, 책정보가져오기, 변환 순서로 누르면 ePub파일이 만들어 집니다.\n"
u"출력장치를 추가하려면 target/에 있는 css파일을 복사 편집하세요.\n",
350, wx.ClientDC(self))
info.WebSite = ("http://code.google.com/p/epubia", "Project Home")
info.Developers = [ "hojelei@gmail.com" ]
#info.License = wordwrap(licenseText, 500, wx.ClientDC(self))
# Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
def OnClose(self, evt):
self.Close(True)
def OnExit(self, evt):
self.Destroy()
def OnUseDestDir(self, evt):
self.config['UseDestDir'] = evt.IsChecked()
def OnDestDirSelect(self, evt):
dlg = wx.DirDialog(self, u"디렉토리 선택:", style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.config['DestDir'] = dlg.GetPath()
self.dirbox.SetValue( self.config['DestDir'] )
dlg.Destroy()
def SetScraper(self):
if self.config['Scraper'] == 'Naver':
import scraper.naver_scraper
self.scraper = scraper.naver_scraper.book_scraper()
self.scraper.key = self.config['NaverAPIKey']
self.config['TryHiresImage'] = True
elif self.config['Scraper'] == 'Daum':
import scraper.daum_scraper
self.scraper = scraper.daum_scraper.book_scraper()
self.scraper.key = self.config['DaumAPIKey']
self.config['TryHiresImage'] = True
else:
import scraper.aladin_scraper
self.scraper = scraper.aladin_scraper.book_scraper()
self.config['TryHiresImage'] = False
#--------------------------------------------------
class MyDataTable(gridlib.PyGridTableBase):
def __init__(self):
gridlib.PyGridTableBase.__init__(self)
self.colLabels = [u"선택", u"파일이름", u"제목", u"저자", "ISBN", u"이미지"]
self.dataTypes = [gridlib.GRID_VALUE_BOOL,
gridlib.GRID_VALUE_STRING,
gridlib.GRID_VALUE_STRING,
gridlib.GRID_VALUE_STRING,
gridlib.GRID_VALUE_STRING,
gridlib.GRID_VALUE_STRING,
]
# array of heterogeneous array
self.data = []
# required methods
def GetNumberRows(self):
return len(self.data)
def GetNumberCols(self):
return len(self.colLabels)
def IsEmptyCell(self, row, col):
try:
return not self.data[row][col]
except IndexError:
return True
def GetValue(self, row, col):
try:
return self.data[row][col]
except IndexError:
return ''
def SetValue(self, row, col, value):
def innerSetValue(row, col, value):
try:
self.data[row][col] = value
newline = 0
except IndexError:
# add new
self.data.append([''] * self.GetNumberCols())
innerSetValue(row, col, value)
newline = 1
# notify grid
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED,
newline # how many
)
self.GetView().ProcessTableMessage(msg)
innerSetValue(row, col, value)
# optional methods
def GetColLabelValue(self, col):
return self.colLabels[col]
def GetTypeName(self, row, col):
return self.dataTypes[col]
def CanGetValueAs(self, row, col, typeName):
colType = self.dataTypes[col].split(':')[0]
if typeName == colType:
return True
else:
return False
def CanSetValueAs(self, row, col, typeName):
return self.CanGetValueAs(row, col, typeName)
# my own methods
def DeleteRow(self, row):
self.data.pop(row)
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED,
row, 1)
self.GetView().ProcessTableMessage(msg)
class MyGrid(gridlib.Grid):
def __init__(self, parent):
gridlib.Grid.__init__(self, parent, wx.ID_ANY)
self.table = MyDataTable()
self.SetTable(self.table, True)
self.SetRowLabelSize(0)
self.SetMargins(0,0)
self.AutoSizeColumns(False)
self.SetColSize(0, 5) # selection
self.SetColSize(1, 200) # file
self.SetColSize(2, 200) # title
self.SetColSize(3, 100) # author
self.SetColSize(4, 100) # isbn
self.SetColSize(5, 100) # cover_url
gridlib.EVT_GRID_CELL_LEFT_DCLICK(self, self.OnCellClick)
gridlib.EVT_GRID_LABEL_LEFT_CLICK(self, self.OnLabelClick)
def OnCellClick(self, evt):
if self.CanEnableCellControl():
self.EnableCellEditControl()
def OnLabelClick(self, evt):
c = evt.GetCol()
if c == 0: # select
num_row = self.table.GetNumberRows()
cnt = 0
for i in range(num_row):
if self.table.data[i][c]:
cnt += 1
if cnt == num_row:
forceval = False
else:
forceval = True
for i in range(num_row):
self.table.data[i][c] = forceval
# refresh
if num_row:
self.table.SetValue(0, c, forceval)
else:
# sort in column
pass # not yet
#--------------------------------------------------
class FileDropTarget(wx.FileDropTarget):
def __init__(self, obj):
wx.FileDropTarget.__init__(self)
self.obj = obj
def OnDropFiles(self, x, y, filenames):
for fname in filenames:
ext = os.path.splitext(fname)[1].lower()
if ext in ['.txt', '.md']:
self.obj.loadFile(fname)
elif ext in ['.jpg', '.jpeg', '.png', '.gif']:
for row, item in enumerate(self.obj.scrap):
item['info']['cover_url'] = fname
self.obj.grid.table.SetValue( row, 5, fname )
else:
print >> sys.stderr, u"ERROR: Unknown format, "+fname
#--------------------------------------------------
class MyOption(wx.Dialog):
""" Option Dialog """
def __init__(self, parent, title, config):
wx.Dialog.__init__(self, parent, title=title)
sizer = wx.BoxSizer( wx.VERTICAL )
mvs = wx.BoxSizer( wx.VERTICAL )
# Target CSS
box1_title = wx.StaticBox(self, wx.ID_ANY, u"출력장치")
box1 = wx.StaticBoxSizer(box1_title, wx.VERTICAL)
grid11 = wx.FlexGridSizer(0, 2, 0, 0)
grid12 = wx.FlexGridSizer(0, 2, 0, 0)
tgtlabel = wx.StaticText(self, wx.ID_ANY, u"장치설정")
targetList = []
import glob
for css in glob.glob("target/*.css"):
targetList.append( os.path.splitext(os.path.basename(css))[0] )
if not config['TargetCSS'] in targetList:
config['TargetCSS'] = targetList[0]
cb1 = wx.Choice(self, choices=targetList)
cb1.SetStringSelection(config['TargetCSS'])
grid11.Add( tgtlabel, 0, wx.ALIGN_CENTRE|wx.LEFT, 5 );
grid11.Add( cb1, 0, wx.ALIGN_CENTRE|wx.LEFT, 5 );
self.css_cb = cb1;
tgtlabel = wx.StaticText(self, wx.ID_ANY, u"글꼴")
targetList = []
import glob
for font in glob.glob("fonts/*.[ot]tf"):
targetList.append( os.path.basename(font) )
if not config['FontFile'] in targetList:
config['FontFile'] = targetList[0]
cb1 = wx.Choice(self, choices=targetList)
cb1.SetStringSelection(config['FontFile'])
grid12.Add( tgtlabel, 0, wx.ALIGN_CENTRE|wx.LEFT, 5 );
grid12.Add( cb1, 0, wx.ALIGN_CENTRE|wx.LEFT, 5 );
self.font_cb = cb1;
box1.Add( grid11, 0, wx.ALIGN_CENTRE|wx.ALL, 5 )
box1.Add( grid12, 0, wx.ALIGN_CENTRE|wx.ALL, 5 )
mvs.Add( box1, 0, wx.ALIGN_CENTRE|wx.ALL, 5 )
# Scraper Selection
box2_title = wx.StaticBox(self, wx.ID_ANY, u"책정보 사이트")
box2 = wx.StaticBoxSizer(box2_title, wx.VERTICAL)
grid2 = wx.FlexGridSizer(0, 2, 0, 0)
self.scrap_ctrls = []
radio1 = wx.RadioButton( self, wx.ID_ANY, "Aladin", style=wx.RB_GROUP )
radio2 = wx.RadioButton( self, wx.ID_ANY, "Naver" )
radio3 = wx.RadioButton( self, wx.ID_ANY, "Daum" )
text1 = wx.TextCtrl( self, wx.ID_ANY, '' )
text2 = wx.TextCtrl( self, wx.ID_ANY, config['NaverAPIKey'] )
text3 = wx.TextCtrl( self, wx.ID_ANY, config['DaumAPIKey'] )
self.scrap_ctrls.append( (radio1, text1) )
self.scrap_ctrls.append( (radio2, text2) )
self.scrap_ctrls.append( (radio3, text3) )
for radio, text in self.scrap_ctrls:
if radio.GetLabel() == config['Scraper']:
radio.SetValue(True)
else:
radio.SetValue(False)
for radio, text in self.scrap_ctrls:
grid2.Add( radio, 0, wx.ALIGN_CENTRE|wx.LEFT|wx.RIGHT|wx.TOP, 5 )
grid2.Add( text, 0, wx.ALIGN_CENTRE|wx.LEFT|wx.RIGHT|wx.TOP, 5 )
text.Hide() # not to allow user to modify key
box2.Add( grid2, 0, wx.ALIGN_CENTRE|wx.ALL, 5 )
mvs.Add( box2, 0, wx.ALIGN_CENTRE|wx.ALL, 5 )
sizer.Add(mvs, 0, wx.ALIGN_CENTRE_VERTICAL|wx.ALL, 5)
# Extra Control
box3_title = wx.StaticBox(self, wx.ID_ANY, u"기타")
box3 = wx.StaticBoxSizer(box3_title, wx.VERTICAL)
grid31 = wx.FlexGridSizer(0, 2, 0, 0)
label1 = wx.StaticText(self, wx.ID_ANY, u"단어분리 교정")
self.wordbreak_cb = wx.Choice(self, choices=['Disabled', 'Pattern', 'Naver Autospacing'])
self.wordbreak_cb.SetStringSelection(({'':'Disabled', 'pattern':'Pattern', 'naver_autospacing':'Naver Autospacing'})[config['CorrectWordBreak']])
grid31.Add( label1, 0, wx.ALIGN_CENTRE|wx.LEFT, 5 );
grid31.Add( self.wordbreak_cb, 0, wx.ALIGN_CENTRE|wx.LEFT, 5 );
box3.Add( grid31, 0, wx.ALIGN_CENTRE|wx.ALL, 5 )
sizer.Add(box3, 0, wx.ALIGN_CENTER, 5)
# Output Format
outfmt_list = [ 'ePub', 'Markdown Text', 'PDF' ]
self.outfmt_lb = wx.CheckListBox(self, wx.ID_ANY, choices=outfmt_list, name=u"출력")
setval = []
if config['OutputEPub']:
setval.append( 0 )
if config['OutputMarkdown']:
setval.append( 1 )
if config['OutputPDF']:
setval.append( 2 )
self.outfmt_lb.SetChecked( setval )
sizer.Add(self.outfmt_lb, 0, wx.ALIGN_CENTER, 5)
# Ok & Cancel Button
btnsizer = wx.StdDialogButtonSizer()
# Ok button
btn = wx.Button(self, wx.ID_OK)
btn.SetDefault()
btnsizer.AddButton( btn )
# Cancel button
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton( btn )
btnsizer.Realize()
sizer.Add(btnsizer, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALL, 5)
self.SetSizer( sizer )
sizer.Fit( self )
for radio, text in self.scrap_ctrls:
self.Bind( wx.EVT_RADIOBUTTON, self.OnScraperSelect, radio )
def OnScraperSelect(self, evt):
radio_selected = evt.GetEventObject()
for radio, text in self.scrap_ctrls:
if radio == radio_selected:
text.Enable(True)
else:
text.Enable(False)
def UpdateConfig(self, config):
#
config['TargetCSS'] = self.css_cb.GetStringSelection()
config['FontFile'] = self.font_cb.GetStringSelection()
#
for radio, text in self.scrap_ctrls:
srvname = radio.GetLabel()
if radio.GetValue():
config['Scraper'] = srvname
if srvname in ['Naver', 'Daum']:
config['%sAPIKey' % srvname] = text.GetLabel()
#
sel = self.wordbreak_cb.GetCurrentSelection()
config['CorrectWordBreak'] = (['', 'pattern', 'naver_autospacing'])[sel]
#
config['OutputEPub'] = False
config['OutputPDF'] = False
config['OutputMarkdown'] = False
for str in self.outfmt_lb.GetCheckedStrings():
if str.lower() == 'epub':
config['OutputEPub'] = True
elif str.lower().startswith('markdown'):
config['OutputMarkdown'] = True
elif str.lower() == 'pdf':
config['OutputPDF'] = True
else:
print >> sys.stderr, "ERROR: Internal error on output format"
#--------------------------------------------------
def gui(config):
app = wx.App(False)
frame = MyFrame(None, __program__, config)
app.MainLoop()
# vim:ts=4:sw=4:et
|
{
"content_hash": "180408b6a880c87ab931d2583bc60966",
"timestamp": "",
"source": "github",
"line_count": 680,
"max_line_length": 153,
"avg_line_length": 40.84264705882353,
"alnum_prop": 0.5096316566449429,
"repo_name": "hojel/epubia",
"id": "f0eb25cff01dba7f646206ad6edc868a92cf7fa8",
"size": "28167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "84"
},
{
"name": "CSS",
"bytes": "8433"
},
{
"name": "HTML",
"bytes": "4762"
},
{
"name": "Python",
"bytes": "80005"
}
],
"symlink_target": ""
}
|
"""
A collection of various helper tools for PyChamberFlux
(c) 2016-2017 Wu Sun <wu.sun@ucla.edu>
"""
import math
def convert_unit_names(output_unit_list):
"""
A helper function to convert units to text representation.
Parameters
----------
output_unit_list : list of float
A list of the output units to parse.
Returns
-------
conc_unit_names : list of str
A list of concentration unit names.
flux_unit_names : list of str
A list of flux unit names.
"""
conc_unit_names = []
flux_unit_names = []
for output_unit in output_unit_list:
if math.isclose(output_unit, 1e-12):
conc_unit = 'pmol mol$^{-1}$'
flux_unit = 'pmol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-9):
conc_unit = 'nmol mol$^{-1}$'
flux_unit = 'nmol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-6):
conc_unit = '$\mu$mol mol$^{-1}$'
flux_unit = '$\mu$mol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-3):
conc_unit = 'mmol mol$^{-1}$'
flux_unit = 'mmol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-2):
conc_unit = '%'
flux_unit = '% m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1.):
conc_unit = 'mol mol$^{-1}$'
flux_unit = 'mol m$^{-2}$ s$^{-1}$'
else:
conc_unit = 'undefined unit'
flux_unit = 'undefined unit'
conc_unit_names.append(conc_unit)
flux_unit_names.append(flux_unit)
return conc_unit_names, flux_unit_names
def create_output_header(data_type, species_list, biomet_var_list=[]):
"""
A helper function to create the header for output data frame.
Parameters
----------
data_type : str
The type of output dataframe
* 'flux' - flux data
* 'diag' - curve fitting diagnostics
species_list : list of str
List of gas species
biomet_var_list : list of str
List of biometeorological variable names
Returns
-------
header : list of str
Table header for the output dataframe. If `data_type` is illegal,
return a blank list.
"""
if data_type == 'flux':
header = ['doy_utc', 'doy_local', 'ch_no', 'ch_label', 'A_ch', 'V_ch']
for conc_suffix in ['_atmb', '_chb', '_cha', '_atma']:
header += [s + conc_suffix for s in species_list]
header += ['sd_' + s + conc_suffix for s in species_list]
header += [s + '_chc_iqr' for s in species_list]
for flux_method in ['_lin', '_rlin', '_nonlin']:
header += ['f' + s + flux_method for s in species_list]
header += ['se_f' + s + flux_method for s in species_list]
# add quality flags for fluxes
header += ['qc_' + s for s in species_list]
# add number of valid observations of concentrations
header += ['n_obs_' + s for s in species_list]
# biomet variables and other auxiliary variables
header += ['flow_lpm', 't_turnover', 't_lag_nom', 't_lag_optmz',
'status_tlag', 'pres', 'T_log', 'T_inst'] + biomet_var_list
elif data_type == 'diag':
header = ['doy_utc', 'doy_local', 'ch_no']
for s in species_list:
header += ['k_lin_' + s, 'b_lin_' + s, 'r_lin_' + s,
'p_lin_' + s, 'rmse_lin_' + s, 'delta_lin_' + s]
for s in species_list:
header += ['k_rlin_' + s, 'b_rlin_' + s,
'k_lolim_rlin_' + s, 'k_uplim_rlin_' + s,
'rmse_rlin_' + s, 'delta_rlin_' + s]
for s in species_list:
header += ['p0_nonlin_' + s, 'p1_nonlin_' + s,
'se_p0_nonlin_' + s, 'se_p1_nonlin_' + s,
'rmse_nonlin_' + s, 'delta_nonlin_' + s]
else:
return []
return header
|
{
"content_hash": "ad28fe432703217063d60ee8a38190b3",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 34.1551724137931,
"alnum_prop": 0.5126198889449772,
"repo_name": "geoalchimista/chflux",
"id": "5633ea56ea75f09ff87318859dd3f629d7a333ff",
"size": "3962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chflux/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "143130"
},
{
"name": "Shell",
"bytes": "202"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_nmea_gps as sensorObj
def main():
# Instantiate a NMEA_GPS UBLOX based i2c sensor on i2c bus 0 at
# address 0x42
sensor = sensorObj.NMEAGPS(0, 0x42)
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# loop, dumping NMEA data out as fast as it comes in
while (True):
if (sensor.dataAvailable(0)):
sys.stdout.write(sensor.readStr(256))
else:
time.sleep(.1)
if __name__ == '__main__':
main()
|
{
"content_hash": "b54d2d046b72fcaab99eb2e0960291b3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 82,
"avg_line_length": 28,
"alnum_prop": 0.6450892857142857,
"repo_name": "whbruce/upm",
"id": "c4615f2fc8bd549ebe8f1a911eb70096a7ea51cc",
"size": "2055",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "examples/python/nmea_gps_i2c.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2404615"
},
{
"name": "C++",
"bytes": "3118131"
},
{
"name": "CMake",
"bytes": "124373"
},
{
"name": "CSS",
"bytes": "18714"
},
{
"name": "HTML",
"bytes": "33016"
},
{
"name": "JavaScript",
"bytes": "47971"
},
{
"name": "Objective-C",
"bytes": "5854"
},
{
"name": "Python",
"bytes": "39398"
}
],
"symlink_target": ""
}
|
import hashlib
from messente.api.sms.api import verification_widget
def test_calculate_signature():
user = "test"
password = "test12"
api = verification_widget.VerificationWidgetAPI(
username=user,
password=password,
)
data = dict(
user=user,
callback_url="http://test.com",
version=1,
phone="+372123456789",
password=password
)
plain = "".join(map(lambda k: (k) + str(data[k]), sorted(data)))
s = (
"callback_urlhttp://test.compasswordtest12"
"phone+372123456789usertestversion1"
)
expected_md5sum = hashlib.md5(s.encode("utf-8")).hexdigest()
md5sum = api.calculate_signature(data)
assert plain == s
assert md5sum == expected_md5sum
assert api.verify_signature(md5sum, data)
assert not api.verify_signature("invalid", data)
def test_validate():
api = verification_widget.VerificationWidgetAPI()
(ok, errors) = api._validate({})
assert not ok
assert "callback_url" in errors
assert errors["callback_url"]
assert "version" in errors
assert errors["version"]
|
{
"content_hash": "215d14ebec906b3c307167153bb4349d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 28.743589743589745,
"alnum_prop": 0.6440677966101694,
"repo_name": "messente/messente-python",
"id": "ee45c7fdde7e9bef32bf56b70c2f3049ebe757c8",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_verification_widget.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "252"
},
{
"name": "Python",
"bytes": "63470"
}
],
"symlink_target": ""
}
|
import numpy as np
from bokeh.io import curdoc, show
from bokeh.models import CircleDot, ColumnDataSource, Grid, LinearAxis, Plot
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
plot = Plot(
title=None, plot_width=300, plot_height=300,
min_border=0, toolbar_location=None)
glyph = CircleDot(x="x", y="y", size="sizes", line_color="#dd1c77", fill_color=None)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
|
{
"content_hash": "b8f9bf348c0f612fa112d50d7231c9ce",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 23.774193548387096,
"alnum_prop": 0.7082767978290366,
"repo_name": "ericmjl/bokeh",
"id": "0ed8295bda4095ae571001d13626afbf3f30087f",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/reference/models/CircleDot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
}
|
def saturate_color(color):
red, green, blue = color
red = min(red, 255)
green = min(green, 255)
blue = min(blue, 255)
return red, green, blue
|
{
"content_hash": "8b00e5b7ef30a1884bd23249bba6b2ea",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 29,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.5798816568047337,
"repo_name": "kevin-teddy/Beginning-Game-Development-with-Python-and-Pygame",
"id": "e92e4d0f2faea656f7a30e0af46975e53980a7a1",
"size": "169",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Chapter 4/4-4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "598735"
}
],
"symlink_target": ""
}
|
from nuage_horizon.dashboards.admin.networks.subnets import views # noqa
|
{
"content_hash": "5fc5b6990d69fca87f1ddb058b507193",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 73,
"avg_line_length": 74,
"alnum_prop": 0.8243243243243243,
"repo_name": "nuagenetworks/nuage-openstack-horizon",
"id": "36e45ecdbcae385b34055e317225e0c2af7d80aa",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nuage_horizon/dashboards/admin/networks/subnets/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19041"
},
{
"name": "JavaScript",
"bytes": "22182"
},
{
"name": "Python",
"bytes": "116090"
},
{
"name": "Shell",
"bytes": "2748"
}
],
"symlink_target": ""
}
|
import requests
respuesta = requests.get("http://api.open-notify.org/astros.json")
data = respuesta.json()
print(data)
cantidad_astronautas = data["number"]
if cantidad_astronautas > 4:
print("Party en el espacio")
else:
print("Invita a la gente")
cant_astro_iss = 0
for person in data['people']:
if person['craft'] == 'ISS':
cant_astro_iss += 1
if cant_astro_iss == cantidad_astronautas:
print("El party es en el ISS")
#parametros = {"lat": 9.0, "lon": 75.25}
#respuesta = requests.get("http://api.open-notify.org/iss-pass.json", params=parametros)
#print(respuesta.content)
#data = respuesta.json()
#print(data)
#respuesta = requests.get("http://api.open-notify.org/iss-pass.json?lat=9.0&lon=79.25")
#print(respuesta.content)
|
{
"content_hash": "dd2692f3ec37f83084ddf6a553062c48",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 88,
"avg_line_length": 23.09090909090909,
"alnum_prop": 0.6850393700787402,
"repo_name": "FreddyGJ/Portafolio-de-prog-3",
"id": "2b5104dd0d1d8e6faa81d033a079d88bad1f1907",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Clase 14/app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1548"
},
{
"name": "CSS",
"bytes": "495"
},
{
"name": "HTML",
"bytes": "38981"
},
{
"name": "JavaScript",
"bytes": "47"
},
{
"name": "Makefile",
"bytes": "1224"
},
{
"name": "Python",
"bytes": "33346"
}
],
"symlink_target": ""
}
|
"""Python API Bindings for the Monty server monitoring Go library"""
__version__ = "0.0.01"
__author__ = [
"Mek <m@hackerlist.net>"
]
__license__ = "Apache 2"
__contributors__ = "see AUTHORS"
from montypy.monty import Monty
|
{
"content_hash": "c8c4c25ff8063b70326a373af447788a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 68,
"avg_line_length": 22.9,
"alnum_prop": 0.6550218340611353,
"repo_name": "hackerlist/monty-python",
"id": "51a7f100628c6efd3b3854b34389cb3b36f21eac",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "montypy/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3108"
}
],
"symlink_target": ""
}
|
import logging
from flask import Flask, jsonify, redirect
from flask_swagger import swagger
import os
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from datalake_api.v0 import v0
from datalake_api import settings
LOGGER = logging.getLogger(__name__)
app = Flask(__name__)
app.config.from_object(settings)
if 'DATALAKE_API_CONFIG' in os.environ:
app.config.from_envvar('DATALAKE_API_CONFIG')
app.register_blueprint(v0)
level = app.config.get('DATALAKE_API_LOG_LEVEL')
if level is not None and not app.debug:
logging.basicConfig(level=level)
logging.getLogger('boto3.resources.action').setLevel(logging.WARN)
sentry_sdk.init(integrations=[FlaskIntegration()])
@app.route('/')
def index():
return redirect("/docs/", code=302)
@app.route("/docs/")
def docs():
return redirect("/static/index.html", code=302)
@app.route("/spec/")
def spec():
swag = swagger(app)
swag['info']['version'] = "0"
swag['info']['title'] = "Datalake API"
swag['info']['description'] = 'Query files in the datalake archive'
return jsonify(swag)
@app.route('/health/')
def health():
return jsonify({})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
{
"content_hash": "830800a1784a5f32c9f24085441920a9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 22.30909090909091,
"alnum_prop": 0.6951915240423798,
"repo_name": "planetlabs/datalake",
"id": "1178095e55a2f34a3b7bdd5fcef1f67e3e4c759f",
"size": "1810",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/datalake_api/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92755"
},
{
"name": "Dockerfile",
"bytes": "1269"
},
{
"name": "HTML",
"bytes": "5081"
},
{
"name": "JavaScript",
"bytes": "2446648"
},
{
"name": "Jinja",
"bytes": "704"
},
{
"name": "Makefile",
"bytes": "1055"
},
{
"name": "Python",
"bytes": "344092"
},
{
"name": "Shell",
"bytes": "850"
}
],
"symlink_target": ""
}
|
import copy
import os
import ConfigParser
import ooinstall.cli_installer as cli
from test.fixture import OOCliFixture, SAMPLE_CONFIG, build_input, read_yaml
from mock import patch
MOCK_FACTS = {
'10.0.0.1': {
'common': {
'ip': '10.0.0.1',
'public_ip': '10.0.0.1',
'hostname': 'master-private.example.com',
'public_hostname': 'master.example.com'
}
},
'10.0.0.2': {
'common': {
'ip': '10.0.0.2',
'public_ip': '10.0.0.2',
'hostname': 'node1-private.example.com',
'public_hostname': 'node1.example.com'
}
},
'10.0.0.3': {
'common': {
'ip': '10.0.0.3',
'public_ip': '10.0.0.3',
'hostname': 'node2-private.example.com',
'public_hostname': 'node2.example.com'
}
},
}
MOCK_FACTS_QUICKHA = {
'10.0.0.1': {
'common': {
'ip': '10.0.0.1',
'public_ip': '10.0.0.1',
'hostname': 'master-private.example.com',
'public_hostname': 'master.example.com'
}
},
'10.0.0.2': {
'common': {
'ip': '10.0.0.2',
'public_ip': '10.0.0.2',
'hostname': 'node1-private.example.com',
'public_hostname': 'node1.example.com'
}
},
'10.0.0.3': {
'common': {
'ip': '10.0.0.3',
'public_ip': '10.0.0.3',
'hostname': 'node2-private.example.com',
'public_hostname': 'node2.example.com'
}
},
'10.0.0.4': {
'common': {
'ip': '10.0.0.4',
'public_ip': '10.0.0.4',
'hostname': 'proxy-private.example.com',
'public_hostname': 'proxy.example.com'
}
},
}
# Missing connect_to on some hosts:
BAD_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
"""
QUICKHA_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
master: true
node: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
master: true
- connect_to: 10.0.0.4
ip: 10.0.0.4
hostname: node3-private.example.com
public_ip: 24.222.0.4
public_hostname: node3.example.com
node: true
- connect_to: 10.0.0.5
ip: 10.0.0.5
hostname: proxy-private.example.com
public_ip: 24.222.0.5
public_hostname: proxy.example.com
master_lb: true
"""
QUICKHA_2_MASTER_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
master: true
node: true
- connect_to: 10.0.0.4
ip: 10.0.0.4
hostname: node3-private.example.com
public_ip: 24.222.0.4
public_hostname: node3.example.com
node: true
- connect_to: 10.0.0.5
ip: 10.0.0.5
hostname: proxy-private.example.com
public_ip: 24.222.0.5
public_hostname: proxy.example.com
master_lb: true
"""
QUICKHA_CONFIG_REUSED_LB = """
variant: %s
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
master: true
node: true
master_lb: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
master: true
"""
QUICKHA_CONFIG_NO_LB = """
variant: %s
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
master: true
node: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
master: true
"""
QUICKHA_CONFIG_PRECONFIGURED_LB = """
variant: %s
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
master: true
node: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
master: true
- connect_to: 10.0.0.4
ip: 10.0.0.4
hostname: node3-private.example.com
public_ip: 24.222.0.4
public_hostname: node3.example.com
node: true
- connect_to: proxy-private.example.com
hostname: proxy-private.example.com
public_hostname: proxy.example.com
master_lb: true
preconfigured: true
"""
class UnattendedCliTests(OOCliFixture):
def setUp(self):
OOCliFixture.setUp(self)
self.cli_args.append("-u")
# unattended with config file and all installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock):
mock_facts = copy.deepcopy(MOCK_FACTS)
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
if result.exception is None or result.exit_code != 1:
print "Exit code: %s" % result.exit_code
self.fail("Unexpected CLI return")
# unattended with config file and all installed hosts (with --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock):
mock_facts = copy.deepcopy(MOCK_FACTS)
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
cli_input=None,
exp_hosts_len=3,
exp_hosts_to_run_on_len=3,
force=True)
# unattended with config file and no installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
cli_input=None,
exp_hosts_len=3,
exp_hosts_to_run_on_len=3,
force=False)
# unattended with config file and no installed hosts (with --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
cli_input=None,
exp_hosts_len=3,
exp_hosts_to_run_on_len=3,
force=True)
# unattended with config file and some installed some uninstalled hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock):
mock_facts = copy.deepcopy(MOCK_FACTS)
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
cli_input=None,
exp_hosts_len=3,
exp_hosts_to_run_on_len=2,
force=False)
# unattended with config file and some installed some uninstalled hosts (with --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock):
mock_facts = copy.deepcopy(MOCK_FACTS)
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
cli_input=None,
exp_hosts_len=3,
exp_hosts_to_run_on_len=3,
force=True)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
load_facts_args = load_facts_mock.call_args[0]
self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
load_facts_args[0])
self.assertEquals(os.path.join(self.work_dir,
"playbooks/byo/openshift_facts.yml"), load_facts_args[1])
env_vars = load_facts_args[2]
self.assertEquals(os.path.join(self.work_dir,
'.ansible/callback_facts.yaml'),
env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
# If user running test has rpm installed, this might be set to default:
self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
# Make sure we ran on the expected masters and nodes:
hosts = run_playbook_mock.call_args[0][0]
hosts_to_run_on = run_playbook_mock.call_args[0][1]
self.assertEquals(3, len(hosts))
self.assertEquals(3, len(hosts_to_run_on))
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_inventory_write(self, load_facts_mock, run_playbook_mock):
# Add an ssh user so we can verify it makes it to the inventory file:
merged_config = "%s\n%s" % (SAMPLE_CONFIG % 'openshift-enterprise',
"ansible_ssh_user: bob")
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), merged_config)
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
# Check the inventory file looks as we would expect:
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('bob',
inventory.get('OSEv3:vars', 'ansible_ssh_user'))
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
# Check the masters:
self.assertEquals(1, len(inventory.items('masters')))
self.assertEquals(3, len(inventory.items('nodes')))
for item in inventory.items('masters'):
# ansible host lines do NOT parse nicely:
master_line = item[0]
if item[1] is not None:
master_line = "%s=%s" % (master_line, item[1])
self.assertTrue('openshift_ip' in master_line)
self.assertTrue('openshift_public_ip' in master_line)
self.assertTrue('openshift_hostname' in master_line)
self.assertTrue('openshift_public_hostname' in master_line)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_variant_version_latest_assumed(self, load_facts_mock,
run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# We didn't specify a version so the latest should have been assumed,
# and written to disk:
self.assertEquals('3.1', written_config['variant_version'])
# Make sure the correct value was passed to ansible:
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_variant_version_preserved(self, load_facts_mock,
run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config = SAMPLE_CONFIG % 'openshift-enterprise'
config = '%s\n%s' % (config, 'variant_version: 3.0')
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), config)
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# Make sure our older version was preserved:
# and written to disk:
self.assertEquals('3.0', written_config['variant_version'])
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
@patch('ooinstall.openshift_ansible.run_ansible')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_ansible_mock.return_value = 0
config = SAMPLE_CONFIG % 'openshift-enterprise'
self._ansible_config_test(load_facts_mock, run_ansible_mock,
config, None, None)
@patch('ooinstall.openshift_ansible.run_ansible')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_ansible_mock.return_value = 0
config = SAMPLE_CONFIG % 'openshift-enterprise'
ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
self._ansible_config_test(load_facts_mock, run_ansible_mock,
config, ansible_config, ansible_config)
@patch('ooinstall.openshift_ansible.run_ansible')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ansible_config_specified_in_installer_config(self,
load_facts_mock, run_ansible_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_ansible_mock.return_value = 0
ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
config = SAMPLE_CONFIG % 'openshift-enterprise'
config = "%s\nansible_config: %s" % (config, ansible_config)
self._ansible_config_test(load_facts_mock, run_ansible_mock,
config, None, ansible_config)
#pylint: disable=too-many-arguments
# This method allows for drastically simpler tests to write, and the args
# are all useful.
def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
installer_config, ansible_config_cli=None, expected_result=None):
"""
Utility method for testing the ways you can specify the ansible config.
"""
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_ansible_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), installer_config)
self.cli_args.extend(["-c", config_file])
if ansible_config_cli:
self.cli_args.extend(["--ansible-config", ansible_config_cli])
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
# Test the env vars for facts playbook:
facts_env_vars = load_facts_mock.call_args[0][2]
if expected_result:
self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
else:
# If user running test has rpm installed, this might be set to default:
self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
# Test the env vars for main playbook:
env_vars = run_ansible_mock.call_args[0][2]
if expected_result:
self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
else:
# If user running test has rpm installed, this might be set to default:
self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
# unattended with bad config file and no installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_bad_config(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assertEquals(1, result.exit_code)
self.assertTrue("You must specify either an ip or hostname"
in result.output)
#unattended with three masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
# Make sure we ran on the expected masters and nodes:
hosts = run_playbook_mock.call_args[0][0]
hosts_to_run_on = run_playbook_mock.call_args[0][1]
self.assertEquals(5, len(hosts))
self.assertEquals(5, len(hosts_to_run_on))
#unattended with two masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
# This is an invalid config:
self.assert_result(result, 1)
self.assertTrue("A minimum of 3 Masters are required" in result.output)
#unattended with three masters, one node, but no load balancer specified:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
# This is not a valid input:
self.assert_result(result, 1)
self.assertTrue('No master load balancer specified in config' in result.output)
#unattended with three masters, one node, and one of the masters reused as load balancer:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
# This is not a valid configuration:
self.assert_result(result, 1)
#unattended with preconfigured lb
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
# Make sure we ran on the expected masters and nodes:
hosts = run_playbook_mock.call_args[0][0]
hosts_to_run_on = run_playbook_mock.call_args[0][1]
self.assertEquals(5, len(hosts))
self.assertEquals(5, len(hosts_to_run_on))
class AttendedCliTests(OOCliFixture):
def setUp(self):
OOCliFixture.setUp(self)
# Doesn't exist but keeps us from reading the local users config:
self.config_file = os.path.join(self.work_dir, 'config.yml')
self.cli_args.extend(["-c", self.config_file])
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
('10.0.0.3', False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('False',
inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
self.assertEquals(None,
inventory.get('nodes', '10.0.0.2'))
self.assertEquals(None,
inventory.get('nodes', '10.0.0.3'))
# interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_add_nodes(self, load_facts_mock, run_playbook_mock):
# Modify the mock facts to return a version indicating OpenShift
# is already installed on our master, and the first node.
mock_facts = copy.deepcopy(MOCK_FACTS)
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
],
add_nodes=[('10.0.0.3', False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 2)
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'),
SAMPLE_CONFIG % 'openshift-enterprise')
cli_input = build_input(confirm_facts='y')
self.cli_args.extend(["-c", config_file])
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
written_config = read_yaml(config_file)
self._verify_config_hosts(written_config, 3)
#interactive with config file and all installed hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
mock_facts = copy.deepcopy(MOCK_FACTS)
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
cli_input = build_input(hosts=[
('10.0.0.1', True),
],
add_nodes=[('10.0.0.2', False)],
ssh_user='root',
variant_num=1,
schedulable_masters_ok=True,
confirm_facts='y')
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
run_playbook_mock,
cli_input,
exp_hosts_len=2,
exp_hosts_to_run_on_len=2,
force=False)
#interactive multimaster: one more node than master
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', True),
('10.0.0.3', True),
('10.0.0.4', False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
master_lb=('10.0.0.5', False))
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 5, 5)
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 5)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('False',
inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
self.assertEquals('False',
inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
self.assertEquals('False',
inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
self.assertEquals(None,
inventory.get('nodes', '10.0.0.4'))
self.assertTrue(inventory.has_section('etcd'))
self.assertEquals(3, len(inventory.items('etcd')))
#interactive multimaster: identical masters and nodes
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', True),
('10.0.0.3', True)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
master_lb=('10.0.0.5', False))
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 4, 4)
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 4)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('True',
inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
self.assertEquals('True',
inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
self.assertEquals('True',
inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
#interactive multimaster: attempting to use a master as the load balancer should fail:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', True),
('10.0.0.3', False),
('10.0.0.4', True)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
master_lb=(['10.0.0.2', '10.0.0.5'], False))
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
self.assert_result(result, 0)
#interactive all-in-one
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_all_in_one(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
('10.0.0.1', True)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 1, 1)
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 1)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('True',
inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
#interactive 3.0 install confirm no HA hints
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_hint(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
('10.0.0.1', True)],
ssh_user='root',
variant_num=2,
confirm_facts='y')
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self.assertTrue("NOTE: Add a total of 3 or more Masters to perform an HA installation."
not in result.output)
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
# TODO: test with config file, attended new node already in config file, plus manually added nodes
# TODO: test with config file, attended reject facts
|
{
"content_hash": "239f911a07d8656d6ca1cace1b140f2b",
"timestamp": "",
"source": "github",
"line_count": 934,
"max_line_length": 98,
"avg_line_length": 40.33832976445396,
"alnum_prop": 0.5926584563117103,
"repo_name": "LutzLange/openshift-ansible",
"id": "ea380d5658cd47f909f033fa57fd8f91765f9010",
"size": "37874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/test/cli_installer_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "567358"
},
{
"name": "Ruby",
"bytes": "3270"
},
{
"name": "Shell",
"bytes": "30719"
},
{
"name": "VimL",
"bytes": "459"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.ifconfig', 'sphinxcontrib.phpdomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CodeIgniter'
copyright = u'2014 - 2015, British Columbia Institute of Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.2'
# The full version, including alpha/beta/rc tags.
release = '3.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :php:func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. php:function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
highlight_language = 'ci'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Specifying a few options; just a starting point & we can play with it.
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/ci-icon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CodeIgniterdoc'
html_copy_source = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CodeIgniter.tex', u'CodeIgniter Documentation',
u'British Columbia Institute of Technology', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'codeigniter', u'CodeIgniter Documentation',
[u'British Columbia Institute of Technology'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'CodeIgniter'
epub_author = u'British Columbia Institute of Technology'
epub_publisher = u'British Columbia Institute of Technology'
epub_copyright = u'2014 - 2015, British Columbia Institute of Technology'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
{
"content_hash": "8038987463dd01b43f708083ae152500",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 80,
"avg_line_length": 32.496,
"alnum_prop": 0.7088872476612507,
"repo_name": "codergma/CodeIgniter",
"id": "c054490c31b2179890d7b043aec217e8df9768d8",
"size": "8546",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "user_guide_src/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "240"
},
{
"name": "CSS",
"bytes": "2486"
},
{
"name": "HTML",
"bytes": "27177"
},
{
"name": "JavaScript",
"bytes": "6398"
},
{
"name": "Makefile",
"bytes": "4614"
},
{
"name": "PHP",
"bytes": "2023679"
},
{
"name": "Python",
"bytes": "11583"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_direct_connect_link_aggregation_group
short_description: Manage Direct Connect LAG bundles.
description:
- Create, delete, or modify a Direct Connect link aggregation group.
version_added: "2.4"
author: "Sloane Hertel (@s-hertel)"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
options:
state:
description:
- The state of the Direct Connect link aggregation group.
choices:
- present
- absent
name:
description:
- The name of the Direct Connect link aggregation group.
link_aggregation_group_id:
description:
- The ID of the Direct Connect link aggregation group.
num_connections:
description:
- The number of connections with which to initialize the link aggregation group.
min_links:
description:
- The minimum number of physical connections that must be operational for the LAG itself to be operational.
location:
description:
- The location of the link aggregation group.
bandwidth:
description:
- The bandwidth of the link aggregation group.
force_delete:
description:
- This allows the minimum number of links to be set to 0, any hosted connections disassociated,
and any virtual interfaces associated to the LAG deleted.
type: bool
connection_id:
description:
- A connection ID to link with the link aggregation group upon creation.
delete_with_disassociation:
description:
- To be used with I(state=absent) to delete connections after disassociating them with the LAG.
type: bool
wait:
description:
- Whether or not to wait for the operation to complete. May be useful when waiting for virtual interfaces
to be deleted. May modify the time of waiting with C(wait_timeout).
type: bool
wait_timeout:
description:
- The duration in seconds to wait if I(wait) is True.
default: 120
"""
EXAMPLES = """
# create a Direct Connect connection
- aws_direct_connect_link_aggregation_group:
state: present
location: EqDC2
lag_id: dxlag-xxxxxxxx
bandwidth: 1Gbps
"""
RETURN = """
changed:
type: str
description: Whether or not the LAG has changed.
returned: always
aws_device:
type: str
description: The AWS Direct Connection endpoint that hosts the LAG.
sample: "EqSe2-1bwfvazist2k0"
returned: when I(state=present)
connections:
type: list
description: A list of connections bundled by this LAG.
sample:
"connections": [
{
"aws_device": "EqSe2-1bwfvazist2k0",
"bandwidth": "1Gbps",
"connection_id": "dxcon-fgzjah5a",
"connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h",
"connection_state": "down",
"lag_id": "dxlag-fgnsp4rq",
"location": "EqSe2",
"owner_account": "448830907657",
"region": "us-west-2"
}
]
returned: when I(state=present)
connections_bandwidth:
type: str
description: The individual bandwidth of the physical connections bundled by the LAG.
sample: "1Gbps"
returned: when I(state=present)
lag_id:
type: str
description: Unique identifier for the link aggregation group.
sample: "dxlag-fgnsp4rq"
returned: when I(state=present)
lag_name:
type: str
description: User-provided name for the link aggregation group.
returned: when I(state=present)
lag_state:
type: str
description: State of the LAG.
sample: "pending"
returned: when I(state=present)
location:
type: str
description: Where the connection is located.
sample: "EqSe2"
returned: when I(state=present)
minimum_links:
type: int
description: The minimum number of physical connections that must be operational for the LAG itself to be operational.
returned: when I(state=present)
number_of_connections:
type: int
description: The number of physical connections bundled by the LAG.
returned: when I(state=present)
owner_account:
type: str
description: Owner account ID of the LAG.
returned: when I(state=present)
region:
type: str
description: The region in which the LAG exists.
returned: when I(state=present)
"""
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3,
get_aws_connection_info, boto3_conn, AWSRetry)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aws.direct_connect import (DirectConnectError,
delete_connection,
delete_virtual_interface,
disassociate_connection_and_lag)
import traceback
import time
try:
import botocore
except Exception:
pass
# handled by imported HAS_BOTO3
def lag_status(client, lag_id):
return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False)
def lag_exists(client, lag_id=None, lag_name=None, verify=True):
""" If verify=True, returns the LAG ID or None
If verify=False, returns the LAG's data (or an empty dict)
"""
try:
if lag_id:
response = client.describe_lags(lagId=lag_id)
else:
response = client.describe_lags()
except botocore.exceptions.ClientError as e:
if lag_id and verify:
return False
elif lag_id:
return {}
else:
failed_op = "Failed to describe DirectConnect link aggregation groups."
raise DirectConnectError(msg=failed_op,
last_traceback=traceback.format_exc(),
exception=e)
match = [] # List of LAG IDs that are exact matches
lag = [] # List of LAG data that are exact matches
# look for matching connections
if len(response.get('lags', [])) == 1 and lag_id:
if response['lags'][0]['lagState'] != 'deleted':
match.append(response['lags'][0]['lagId'])
lag.append(response['lags'][0])
else:
for each in response.get('lags', []):
if each['lagState'] != 'deleted':
if not lag_id:
if lag_name == each['lagName']:
match.append(each['lagId'])
else:
match.append(each['lagId'])
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
return match[0]
elif verify:
return False
# not verifying if the connection exists; just return current connection info
else:
if len(lag) == 1:
return lag[0]
else:
return {}
def create_lag(client, num_connections, location, bandwidth, name, connection_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.",
last_traceback=None,
exception="")
parameters = dict(numberOfConnections=num_connections,
location=location,
connectionsBandwidth=bandwidth,
lagName=name)
if connection_id:
parameters.update(connectionId=connection_id)
try:
lag = client.create_lag(**parameters)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name),
last_traceback=traceback.format_exc(),
exception=e)
return lag['lagId']
def delete_lag(client, lag_id):
try:
client.delete_lag(lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id),
last_traceback=traceback.format_exc(),
exception=e)
@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException'])
def _update_lag(client, lag_id, lag_name, min_links):
params = {}
if min_links:
params.update(minimumLinks=min_links)
if lag_name:
params.update(lagName=lag_name)
client.update_lag(lagId=lag_id, **params)
def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout):
start = time.time()
if min_links and min_links > num_connections:
raise DirectConnectError(
msg="The number of connections {0} must be greater than the minimum number of links "
"{1} to update the LAG {2}".format(num_connections, min_links, lag_id),
last_traceback=None,
exception=None
)
while True:
try:
_update_lag(client, lag_id, lag_name, min_links)
except botocore.exceptions.ClientError as e:
if wait and time.time() - start <= wait_timeout:
continue
msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id)
if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']:
msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links)
raise DirectConnectError(msg=msg,
last_traceback=traceback.format_exc(),
exception=e)
else:
break
def lag_changed(current_status, name, min_links):
""" Determines if a modifiable link aggregation group attribute has been modified. """
return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks'])
def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout):
exists = lag_exists(client, lag_id, lag_name)
if not exists and lag_id:
raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id),
last_traceback=None,
exception="")
# the connection is found; get the latest state and see if it needs to be updated
if exists:
lag_id = exists
latest_state = lag_status(client, lag_id)
if lag_changed(latest_state, lag_name, min_links):
update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
return True, lag_id
return False, lag_id
# no connection found; create a new one
else:
lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id)
update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
return True, lag_id
def describe_virtual_interfaces(client, lag_id):
try:
response = client.describe_virtual_interfaces(connectionId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id),
last_traceback=traceback.format_exc(),
exception=e)
return response.get('virtualInterfaces', [])
def get_connections_and_virtual_interfaces(client, lag_id):
virtual_interfaces = describe_virtual_interfaces(client, lag_id)
connections = lag_status(client, lag_id=lag_id).get('connections', [])
return virtual_interfaces, connections
def disassociate_vis(client, lag_id, virtual_interfaces):
for vi in virtual_interfaces:
delete_virtual_interface(client, vi['virtualInterfaceId'])
try:
response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId'])
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id),
last_traceback=traceback.format_exc(),
exception=e)
def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout):
lag_id = lag_exists(client, lag_id, lag_name)
if not lag_id:
return False
latest_status = lag_status(client, lag_id)
# determine the associated connections and virtual interfaces to disassociate
virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id)
# If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete
if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete:
raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. "
"To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). "
"Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True "
"and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id),
last_traceback=None,
exception=None)
# update min_links to be 0 so we can remove the LAG
update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout)
# if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached
for connection in connections:
disassociate_connection_and_lag(client, connection['connectionId'], lag_id)
if delete_with_disassociation:
delete_connection(client, connection['connectionId'])
for vi in virtual_interfaces:
delete_virtual_interface(client, vi['virtualInterfaceId'])
start_time = time.time()
while True:
try:
delete_lag(client, lag_id)
except DirectConnectError as e:
if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait:
continue
else:
return True
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(),
link_aggregation_group_id=dict(),
num_connections=dict(type='int'),
min_links=dict(type='int'),
location=dict(),
bandwidth=dict(),
connection_id=dict(),
delete_with_disassociation=dict(type='bool', default=False),
force_delete=dict(type='bool', default=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=120),
))
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[('link_aggregation_group_id', 'name')],
required_if=[('state', 'present', ('location', 'bandwidth'))])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
connection = boto3_conn(module, conn_type='client',
resource='directconnect', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
state = module.params.get('state')
response = {}
try:
if state == 'present':
changed, lag_id = ensure_present(connection,
num_connections=module.params.get("num_connections"),
lag_id=module.params.get("link_aggregation_group_id"),
lag_name=module.params.get("name"),
location=module.params.get("location"),
bandwidth=module.params.get("bandwidth"),
connection_id=module.params.get("connection_id"),
min_links=module.params.get("min_links"),
wait=module.params.get("wait"),
wait_timeout=module.params.get("wait_timeout"))
response = lag_status(connection, lag_id)
elif state == "absent":
changed = ensure_absent(connection,
lag_id=module.params.get("link_aggregation_group_id"),
lag_name=module.params.get("name"),
force_delete=module.params.get("force_delete"),
delete_with_disassociation=module.params.get("delete_with_disassociation"),
wait=module.params.get('wait'),
wait_timeout=module.params.get('wait_timeout'))
except DirectConnectError as e:
if e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception))
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
if __name__ == '__main__':
main()
|
{
"content_hash": "d9659660af847ed1a57d6025360fdfbf",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 150,
"avg_line_length": 39.79203539823009,
"alnum_prop": 0.6150895140664961,
"repo_name": "thaim/ansible",
"id": "8d1d449ee3fbbc7954dcdae2baf3ff02a6104365",
"size": "18135",
"binary": false,
"copies": "8",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/amazon/aws_direct_connect_link_aggregation_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import (UserProfile, Service, Shift,
ServiceStatusSnapshot, StatusCheck, StatusCheckResult,
Instance, AlertAcknowledgement)
from .alert import AlertPluginUserData, AlertPlugin
admin.site.register(UserProfile)
admin.site.register(Shift)
admin.site.register(Service)
admin.site.register(ServiceStatusSnapshot)
admin.site.register(StatusCheck)
admin.site.register(StatusCheckResult)
admin.site.register(Instance)
admin.site.register(AlertPlugin)
admin.site.register(AlertPluginUserData)
admin.site.register(AlertAcknowledgement)
|
{
"content_hash": "1b5bacc85f8eb419447463e3d249f6ef",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 36.3125,
"alnum_prop": 0.8416523235800344,
"repo_name": "xinity/cabot",
"id": "63185903893783129600ff48a19d2af0fd0fa472",
"size": "581",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cabot/cabotapp/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21910"
},
{
"name": "HTML",
"bytes": "72733"
},
{
"name": "JavaScript",
"bytes": "368548"
},
{
"name": "Python",
"bytes": "359824"
},
{
"name": "Shell",
"bytes": "7109"
}
],
"symlink_target": ""
}
|
def create_screen(rows, columns):
'''
Creates a screen of rows x columns pixels
'''
grid = []
for row in range(rows):
grid.append([0] * columns)
return grid
# From lecture
def print_screen(screen):
''' Prints the screen to the console.
When a pixel == 0, then a '*' is displayed
When a pixel == 1, then a ' ' is displayed
'''
for row in range(len(screen)):
for col in range(len(screen[0])):
if screen[row][col] == 0:
print('*', end='')
else:
print(' ', end='')
print()
return
def invert_pixels(screen):
'''
(20 points)
Changes all pixels from 0 to 1 or 1 to 0
on the screen.
- Returns the updated screen.
'''
return "stub"
def fill_rect(ulrow,ulcol,lrrow,lrcol,screen):
'''
(20 points)
Draws (fills) a rectangle on the screen defined by
the upper-left (ulrow,ulcol) and lower-right points
(lrrow,lrcol) on the screen.
- Returns the updated screen.
- You may assume that these upper-left and lower-right
points are the correct types (integers) and within the
bounds of the screen dimensions.
'''
return "stub"
def draw_rect(ulrow,ulcol,lrrow,lrcol,screen):
'''
(20 points)
Draws only the outline of a rectangle of the screen defined by
the upper-left and lower-right points. It does not "fill"
the rectangle.
- Returns the updated screen.
- You may assume that these upper-left and lower-right
points are correct type (integers) and within the
bounds of the screen dimensions.
'''
return "stub"
def draw_line(row1,col1,row2,col2,screen):
'''
(40 points)
Draws a line between two points (row1,col1 is one point,
(row2,col2 is the other point) on the screen.
- Return the updated screen.
- You may assume that these upper-left and lower-right
points are correct type (integers) and within the bounds of the
screen dimensions.
'''
return "stub"
# Testing your functionality visually by printing the screen
# should go in the if __name__=='__main__' condition.
# Recall that this code gets executed when executing the module directly.
# When lab08.py is imported, then this code will not execute.
if __name__=="__main__":
print("Run visual print screen tests here")
#screen = create_screen(20,20) # Creates a 20x20 screen
#print_screen(screen)
|
{
"content_hash": "c876c7e2b4defc5911b0212f94c7d357",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 73,
"avg_line_length": 28.709302325581394,
"alnum_prop": 0.6306196840826246,
"repo_name": "ucsb-cs8-s18/ucsb-cs8-s18.github.io",
"id": "ff9a8b5d9a38de015df0b29c75fee63dfd4d4746",
"size": "2586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_lab/lab08/lab08.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26757"
},
{
"name": "HTML",
"bytes": "31406"
},
{
"name": "JavaScript",
"bytes": "816117"
},
{
"name": "Python",
"bytes": "27655"
},
{
"name": "Ruby",
"bytes": "6911"
},
{
"name": "Shell",
"bytes": "1116"
}
],
"symlink_target": ""
}
|
"""
Forms for test execution.
"""
import json
from django.core.exceptions import ValidationError, ObjectDoesNotExist
import floppyforms.__future__ as forms
from ... import model
class EnvironmentSelectionForm(forms.Form):
"""Form for selecting an environment."""
def __init__(self, *args, **kwargs):
"""Accepts ``environments`` queryset and ``current`` env id."""
environments = kwargs.pop("environments", [])
current = kwargs.pop("current", None)
super(EnvironmentSelectionForm, self).__init__(*args, **kwargs)
# list of categories, ordered by name
self.categories = []
# maps category to set of elements
self.elements_by_category = {}
# maps environment ID to list of element IDs, ordered by category
self.elementids_by_envid = {}
# elements in current environment
current_elements = []
env_element_through_model = model.Environment.elements.through
env_element_relationships = env_element_through_model.objects.filter(
environment__in=environments).select_related()
# first construct the ordered list of categories (and current elements)
cat_set = set()
for ee in env_element_relationships:
cat_set.add(ee.element.category)
if ee.environment.id == current:
current_elements.append(ee.element)
self.categories = sorted(cat_set, key=lambda c: c.name)
num_categories = len(self.categories)
# populate elements by category and environment
for ee in env_element_relationships:
byenv = self.elementids_by_envid.setdefault(
ee.environment.id, [None] * num_categories)
category_index = self.categories.index(ee.element.category)
byenv[category_index] = ee.element.id
bycat = self.elements_by_category.setdefault(
ee.element.category, set())
bycat.add(ee.element)
# construct choice-field for each env type
for category in self.categories:
self.fields["category_{0}".format(category.id)] = forms.ChoiceField(
choices=[("", "---------")] + [
(e.id, e.name) for e in sorted(
self.elements_by_category[category],
key=lambda e: e.name)
],
label=category.name,
required=False)
# set initial data based on current user environment
for element in current_elements:
field_name = "category_{0}".format(element.category.id)
self.initial[field_name] = element.id
def clean(self):
"""Validate that selected elements form valid environment."""
# only act on category_ items. There may be other fields
# like "build" in here if a run series is being executed.
selected_element_ids = set(
[int(eid) for k, eid in self.cleaned_data.iteritems()
if k.find("category_") == 0 and eid])
matches = [
envid for envid, element_ids in self.elementids_by_envid.items()
if set([e for e in element_ids if e]).issubset(selected_element_ids)
]
if not matches:
raise forms.ValidationError(
"The selected environment is not valid for this test run. "
"Please select a different combination.")
self.cleaned_data["environment"] = matches[0]
return self.cleaned_data
def save(self):
"""Return selected environment ID."""
return self.cleaned_data["environment"]
def valid_environments_json(self):
"""Return lists of element IDs representing valid envs, as JSON."""
return json.dumps(self.elementids_by_envid.values())
class EnvironmentBuildSelectionForm(EnvironmentSelectionForm):
"""
Form to select your environment and specify a build.
This is if the user is running a Run that is a series. If so, then it
prompts for a build number::
1. If the clone of this run with that build number already exists,
Then execute that run with the specified env.
2. If it does not exist, then clone this run, set the build field
and execute it with the env specified.
"""
build = forms.CharField(max_length=200, required=False)
fields = ["build"]
def __init__(self, *args, **kwargs):
self.run = kwargs.pop("run", None)
self.build = kwargs.pop("build", None)
self.user = kwargs.pop("user", None)
super(EnvironmentBuildSelectionForm, self).__init__(*args, **kwargs)
def clean_build(self):
"""
Check that the build value is set.
"""
if not self.cleaned_data["build"]:
raise ValidationError("You must specify a build to test.")
return self.cleaned_data["build"]
def save(self):
"""Find the run with this build, or create a new one."""
try:
this_run = model.Run.objects.get(
series=self.run,
build=self.cleaned_data["build"],
)
except ObjectDoesNotExist:
this_run = self.run.clone_for_series(
build=self.cleaned_data["build"],
user=self.user,
)
this_run.activate()
# now we need to return this new run as the one to be executed.
return super(EnvironmentBuildSelectionForm, self).save(), this_run.id
|
{
"content_hash": "3ae1f2a10af7b0749dafa2a2bb6cbfdb",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 80,
"avg_line_length": 36.05844155844156,
"alnum_prop": 0.6000360165676211,
"repo_name": "shinglyu/moztrap",
"id": "a3f48bb280418999419e8c2d46e59fd6e7c7d9a2",
"size": "5553",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "moztrap/view/runtests/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "713042"
},
{
"name": "HTML",
"bytes": "1056025"
},
{
"name": "JavaScript",
"bytes": "270285"
},
{
"name": "Python",
"bytes": "2088749"
},
{
"name": "Ruby",
"bytes": "464"
},
{
"name": "Shell",
"bytes": "867"
}
],
"symlink_target": ""
}
|
from framework.core.registry import plugin_registry
from framework.core.util import extract_artifact_id, listify_duplicate_keys
from framework.types import Artifact, Parameterized, Primitive
from framework.types.parameterized import List, ChooseMany
import framework.db as db
import datetime
class Executor(object):
def __init__(self, job):
self.job = job
def __call__(self):
method_uri = self.job.workflow.template # TODO currently the template is just the method
method = plugin_registry.get_plugin(method_uri).get_method(method_uri)
study = self.job.study.id
inputs = listify_duplicate_keys(self.job.inputs)
results = method(study, **inputs)
for i, (result, output) in enumerate(zip(results, method.outputs)):
ordered_result = traverse_result_and_record(result, output)
db.JobOutput(job=self.job, order=i, result=ordered_result).save()
self.job.status = 'completed'
self.job.completed = datetime.datetime.now()
self.job.save()
def traverse_result_and_record(result, type_, order=0, parent=None):
if issubclass(type_, Artifact):
ordered_result = db.OrderedResult(order=order,
parent=parent,
artifact=db.ArtifactProxy.get(db.ArtifactProxy.id == extract_artifact_id(result)))
ordered_result.save()
return ordered_result
if issubclass(type_, Primitive):
ordered_result = db.OrderedResult(order=order,
parent=parent,
primitive=result)
ordered_result.save()
return ordered_result
if type_.name == 'List' or type_.name == 'ChooseMany':
parent = db.OrderedResult(order=order, parent=parent)
parent.save()
for i, r in enumerate(result):
traverse_result_and_record(r, type_.subtype, order=i, parent=parent)
return parent
return traverse_result_and_record(result, type_.subtype, order=order, parent=parent)
|
{
"content_hash": "cc8fa5c83a71b647651e5f4f315804bb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 124,
"avg_line_length": 41.11764705882353,
"alnum_prop": 0.631378159275155,
"repo_name": "biocore/metoo",
"id": "4cc1b0899c0a2212d8317ef79216609092d25619",
"size": "2097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework/core/executor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "18148"
},
{
"name": "JavaScript",
"bytes": "12170"
},
{
"name": "Python",
"bytes": "52718"
},
{
"name": "Shell",
"bytes": "4763"
}
],
"symlink_target": ""
}
|
"""Huawei LTE constants."""
DOMAIN = "huawei_lte"
CONF_TRACK_WIRED_CLIENTS = "track_wired_clients"
DEFAULT_DEVICE_NAME = "LTE"
DEFAULT_NOTIFY_SERVICE_NAME = DOMAIN
DEFAULT_TRACK_WIRED_CLIENTS = True
UPDATE_SIGNAL = f"{DOMAIN}_update"
CONNECTION_TIMEOUT = 10
NOTIFY_SUPPRESS_TIMEOUT = 30
SERVICE_CLEAR_TRAFFIC_STATISTICS = "clear_traffic_statistics"
SERVICE_REBOOT = "reboot"
SERVICE_RESUME_INTEGRATION = "resume_integration"
SERVICE_SUSPEND_INTEGRATION = "suspend_integration"
ADMIN_SERVICES = {
SERVICE_CLEAR_TRAFFIC_STATISTICS,
SERVICE_REBOOT,
SERVICE_RESUME_INTEGRATION,
SERVICE_SUSPEND_INTEGRATION,
}
KEY_DEVICE_BASIC_INFORMATION = "device_basic_information"
KEY_DEVICE_INFORMATION = "device_information"
KEY_DEVICE_SIGNAL = "device_signal"
KEY_DIALUP_MOBILE_DATASWITCH = "dialup_mobile_dataswitch"
KEY_LAN_HOST_INFO = "lan_host_info"
KEY_MONITORING_CHECK_NOTIFICATIONS = "monitoring_check_notifications"
KEY_MONITORING_MONTH_STATISTICS = "monitoring_month_statistics"
KEY_MONITORING_STATUS = "monitoring_status"
KEY_MONITORING_TRAFFIC_STATISTICS = "monitoring_traffic_statistics"
KEY_NET_CURRENT_PLMN = "net_current_plmn"
KEY_NET_NET_MODE = "net_net_mode"
KEY_SMS_SMS_COUNT = "sms_sms_count"
KEY_WLAN_HOST_LIST = "wlan_host_list"
KEY_WLAN_WIFI_FEATURE_SWITCH = "wlan_wifi_feature_switch"
BINARY_SENSOR_KEYS = {
KEY_MONITORING_CHECK_NOTIFICATIONS,
KEY_MONITORING_STATUS,
KEY_WLAN_WIFI_FEATURE_SWITCH,
}
DEVICE_TRACKER_KEYS = {
KEY_LAN_HOST_INFO,
KEY_WLAN_HOST_LIST,
}
SENSOR_KEYS = {
KEY_DEVICE_INFORMATION,
KEY_DEVICE_SIGNAL,
KEY_MONITORING_CHECK_NOTIFICATIONS,
KEY_MONITORING_MONTH_STATISTICS,
KEY_MONITORING_STATUS,
KEY_MONITORING_TRAFFIC_STATISTICS,
KEY_NET_CURRENT_PLMN,
KEY_NET_NET_MODE,
KEY_SMS_SMS_COUNT,
}
SWITCH_KEYS = {KEY_DIALUP_MOBILE_DATASWITCH}
ALL_KEYS = (
BINARY_SENSOR_KEYS
| DEVICE_TRACKER_KEYS
| SENSOR_KEYS
| SWITCH_KEYS
| {KEY_DEVICE_BASIC_INFORMATION}
)
|
{
"content_hash": "af5676c46ae768b33e0b4d8695c80dd6",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 69,
"avg_line_length": 26.83783783783784,
"alnum_prop": 0.7376636455186304,
"repo_name": "kennedyshead/home-assistant",
"id": "7e34b3dbd160dd98068b62488a5d957c3b139b8b",
"size": "1986",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/huawei_lte/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
queries = {
"dbtype": "mysql",
"column": {
"head": "select {column} from {schema}.{table} limit {n};",
"all": "select {column} from {schema}.{table};",
"unique": "select distinct {column} from {schema}.{table};",
"sample": "select {column} from {schema}.{table} order by rand() limit {n};"
},
"table": {
"select": "select {columns} from {schema}.{table};",
"head": "select * from {schema}.{table} limit {n};",
"all": "select * from {schema}.{table};",
"unique": "select distinct {columns} from {schema}.{table};",
"sample": "select * from {schema}.{table} order by rand() limit {n};"
},
"system": {
"schema_no_system": """
select table_schema
, table_name
, column_name
, data_type
from
information_schema.columns
where
table_schema not in ('information_schema', 'performance_schema', 'mysql')
""",
"schema_with_system": """
select table_schema
, table_name
, column_name
, data_type
from
information_schema.columns;
""",
"schema_specified": """
select table_schema
, table_name
, column_name
, udt_name
from
information_schema.columns
where table_schema in (%s);
""",
"foreign_keys_for_table": """
select column_name
, referenced_table_schema
, referenced_table_name
, referenced_column_name
from
information_schema.key_column_usage
where
table_name = '{table}'
and referenced_column_name IS NOT NULL
and table_schema = '{table_schema}';
""",
"foreign_keys_for_column": """
select column_name
, referenced_table_schema
, referenced_table_name
, referenced_column_name
from
information_schema.key_column_usage
where
table_name = '{table}'
and column_name = '{column}'
and referenced_column_name IS NOT NULL
and table_schema = '{table_schema}';
""",
"ref_keys_for_table": """
select referenced_column_name
, table_schema
, table_name
, column_name
from
information_schema.key_column_usage
where
referenced_table_name = '{table}'
and referenced_column_name IS NOT NULL
and table_schema = '{table_schema}';
""",
"foreign_keys_for_db": """
select column_name
, referenced_table_schema
, referenced_table_name
, referenced_column_name
FROM
information_schema.key_column_usage
WHERE referenced_column_name IS NOT NULL;
""",
"ref_keys_for_db": """
SELECT referenced_column_name,
table_schema,
table_name,
column_name
FROM
information_schema.key_column_usage
WHERE referenced_column_name IS NOT NULL;
"""
}
}
|
{
"content_hash": "94554d808d8d05f4deb3fdd0a977cd4b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 93,
"avg_line_length": 35.26,
"alnum_prop": 0.46199659671015314,
"repo_name": "yhat/db.py",
"id": "05f3c9b63752a0b71a6b00fcb13905af48f2ff23",
"size": "3526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db/queries/mysql.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1265"
},
{
"name": "Python",
"bytes": "98549"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
}
|
"""Demo fan platform that has a fake fan."""
from __future__ import annotations
from homeassistant.components.fan import (
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_PRESET_MODE,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
PRESET_MODE_AUTO = "auto"
PRESET_MODE_SMART = "smart"
PRESET_MODE_SLEEP = "sleep"
PRESET_MODE_ON = "on"
FULL_SUPPORT = SUPPORT_SET_SPEED | SUPPORT_OSCILLATE | SUPPORT_DIRECTION
LIMITED_SUPPORT = SUPPORT_SET_SPEED
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the demo fan platform."""
async_add_entities(
[
DemoPercentageFan(
hass,
"fan1",
"Living Room Fan",
FULL_SUPPORT,
[
PRESET_MODE_AUTO,
PRESET_MODE_SMART,
PRESET_MODE_SLEEP,
PRESET_MODE_ON,
],
),
DemoPercentageFan(
hass,
"fan2",
"Ceiling Fan",
LIMITED_SUPPORT,
None,
),
AsyncDemoPercentageFan(
hass,
"fan3",
"Percentage Full Fan",
FULL_SUPPORT,
[
PRESET_MODE_AUTO,
PRESET_MODE_SMART,
PRESET_MODE_SLEEP,
PRESET_MODE_ON,
],
),
DemoPercentageFan(
hass,
"fan4",
"Percentage Limited Fan",
LIMITED_SUPPORT,
[
PRESET_MODE_AUTO,
PRESET_MODE_SMART,
PRESET_MODE_SLEEP,
PRESET_MODE_ON,
],
),
AsyncDemoPercentageFan(
hass,
"fan5",
"Preset Only Limited Fan",
SUPPORT_PRESET_MODE,
[
PRESET_MODE_AUTO,
PRESET_MODE_SMART,
PRESET_MODE_SLEEP,
PRESET_MODE_ON,
],
),
]
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class BaseDemoFan(FanEntity):
"""A demonstration fan component that uses legacy fan speeds."""
def __init__(
self,
hass,
unique_id: str,
name: str,
supported_features: int,
preset_modes: list[str] | None,
) -> None:
"""Initialize the entity."""
self.hass = hass
self._unique_id = unique_id
self._supported_features = supported_features
self._percentage: int | None = None
self._preset_modes = preset_modes
self._preset_mode: str | None = None
self._oscillating: bool | None = None
self._direction: str | None = None
self._name = name
if supported_features & SUPPORT_OSCILLATE:
self._oscillating = False
if supported_features & SUPPORT_DIRECTION:
self._direction = "forward"
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self) -> str:
"""Get entity name."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo fan."""
return False
@property
def current_direction(self) -> str | None:
"""Fan direction."""
return self._direction
@property
def oscillating(self) -> bool | None:
"""Oscillating."""
return self._oscillating
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
class DemoPercentageFan(BaseDemoFan, FanEntity):
"""A demonstration fan component that uses percentages."""
@property
def percentage(self) -> int | None:
"""Return the current speed."""
return self._percentage
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return 3
def set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
self._percentage = percentage
self._preset_mode = None
self.schedule_update_ha_state()
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., auto, smart, interval, favorite."""
return self._preset_mode
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes."""
return self._preset_modes
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self.preset_modes and preset_mode in self.preset_modes:
self._preset_mode = preset_mode
self._percentage = None
self.schedule_update_ha_state()
else:
raise ValueError(f"Invalid preset mode: {preset_mode}")
def turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Turn on the entity."""
if preset_mode:
self.set_preset_mode(preset_mode)
return
if percentage is None:
percentage = 67
self.set_percentage(percentage)
def turn_off(self, **kwargs) -> None:
"""Turn off the entity."""
self.set_percentage(0)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self._direction = direction
self.schedule_update_ha_state()
def oscillate(self, oscillating: bool) -> None:
"""Set oscillation."""
self._oscillating = oscillating
self.schedule_update_ha_state()
class AsyncDemoPercentageFan(BaseDemoFan, FanEntity):
"""An async demonstration fan component that uses percentages."""
@property
def percentage(self) -> int | None:
"""Return the current speed."""
return self._percentage
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return 3
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
self._percentage = percentage
self._preset_mode = None
self.async_write_ha_state()
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., auto, smart, interval, favorite."""
return self._preset_mode
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes."""
return self._preset_modes
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self.preset_modes is None or preset_mode not in self.preset_modes:
raise ValueError(
"{preset_mode} is not a valid preset_mode: {self.preset_modes}"
)
self._preset_mode = preset_mode
self._percentage = None
self.async_write_ha_state()
async def async_turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Turn on the entity."""
if preset_mode:
await self.async_set_preset_mode(preset_mode)
return
if percentage is None:
percentage = 67
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity."""
await self.async_oscillate(False)
await self.async_set_percentage(0)
async def async_set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self._direction = direction
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation."""
self._oscillating = oscillating
self.async_write_ha_state()
|
{
"content_hash": "ebb96847dc8013c148a3f2e4ad69b131",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 84,
"avg_line_length": 29.45608108108108,
"alnum_prop": 0.5543066865466223,
"repo_name": "GenericStudent/home-assistant",
"id": "8fcc6a810ed19768b1dcd906bc41ac8dacf4f73d",
"size": "8719",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/demo/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
def fetch_environment(file_id, files=None, module_names=None):
"""Return module environment dictionary from *file_id*.
*file_id* represent the identifier of the file.
*files* is an optional list of the other file names stored in the same
directory as the one analyzed.
*module_names* is an optional list of all the other module name
previously fetched to help determine the module name of the current
file.
The environment is in the form of::
{
"id": "module.test",
"name": test,
"file_id": "module/test/index.js"
}
"""
if module_names is None:
module_names = []
hierarchy = file_id.split("/")
file_name = hierarchy.pop()
if file_name == "index.js":
module_id = ".".join(hierarchy)
module_name = _guess_module_name(
hierarchy[-1],
hierarchy_folders=hierarchy[:-1],
module_names=module_names
)
elif "index.js" in files:
name = file_name.split(".js")[0]
module_id = ".".join(hierarchy + [name])
module_name = _guess_module_name(
".".join([hierarchy[-1], name]),
hierarchy_folders=hierarchy[:-1],
module_names=module_names
)
else:
name = file_name.split(".js")[0]
module_id = ".".join(hierarchy + [name])
module_name = name
return {
"id": module_id,
"name": module_name,
"path": module_id.replace(".", "/"),
"file_id": file_id
}
def _guess_module_name(name, hierarchy_folders, module_names):
"""Return the full module *name* from *hierarchy_folders*.
*module_names* is the list of modules already fetched.
"""
for i in range(len(hierarchy_folders)):
root_module = ".".join(hierarchy_folders[i:])
if root_module in module_names:
return ".".join([root_module, name])
return name
|
{
"content_hash": "edb36ab2c50cdddc16da18ace560d6a5",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 28.26086956521739,
"alnum_prop": 0.5712820512820512,
"repo_name": "buddly27/champollion",
"id": "1710383d8869cfdae1165404200a4ef63f775eeb",
"size": "1969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/champollion/parser/js_module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "332819"
}
],
"symlink_target": ""
}
|
"""
* *******************************************************
* Copyright VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = '6.0+'
from com.vmware.content_client import (Library,
LocalLibrary,
SubscribedLibrary)
from com.vmware.content.library_client import Item, SubscribedItem
from com.vmware.content.library.item_client import DownloadSession
from com.vmware.content.library.item_client import UpdateSession
from com.vmware.content.library.item.downloadsession_client import File as DownloadSessionFile
from com.vmware.content.library.item.updatesession_client import File as UpdateSessionFile
from com.vmware.vcenter_client import VM
from com.vmware.vcenter.iso_client import Image
from com.vmware.vcenter.ovf_client import LibraryItem
from com.vmware.vcenter.vm_template_client import LibraryItems as VmtxLibraryItem
class ClsApiClient(object):
"""
This is a simplified wrapper around the Content Library APIs.
It is used to access services exposed by Content Library Service.
"""
def __init__(self, service_manager):
# Client for all the services on a management node.
self.service_manager = service_manager
# Returns the service which provides support for generic functionality
# which can be applied equally to all types of libraries
self.library_service = Library(self.service_manager.stub_config)
# Returns the service for managing local libraries
self.local_library_service = LocalLibrary(self.service_manager.stub_config)
# Returns the service for managing subscribed libraries
self.subscribed_library_service = SubscribedLibrary(self.service_manager.stub_config)
# Returns the service for managing library items
self.library_item_service = Item(self.service_manager.stub_config)
# Returns the service for managing sessions to update or delete content
self.upload_service = UpdateSession(self.service_manager.stub_config)
# Returns the service for managing files within an update session
self.upload_file_service = UpdateSessionFile(self.service_manager.stub_config)
# Returns the service for managing sessions to download content
self.download_service = DownloadSession(self.service_manager.stub_config)
# Returns the service for managing files within a download session
self.download_file_service = DownloadSessionFile(self.service_manager.stub_config)
# Returns the service for deploying virtual machines from OVF library items
self.ovf_lib_item_service = LibraryItem(self.service_manager.stub_config)
# Returns the service for mount and unmount of an iso file on a VM
self.iso_service = Image(self.service_manager.stub_config)
# Returns the service for managing subscribed library items
self.subscribed_item_service = SubscribedItem(self.service_manager.stub_config)
# Returns the service for managing library items containing virtual
# machine templates
self.vmtx_service = VmtxLibraryItem(self.service_manager.stub_config)
# Creates the service that communicates with virtual machines
self.vm_service = VM(self.service_manager.stub_config)
# TODO: Add the other CLS services, eg. storage, config, type
|
{
"content_hash": "4440c31bfc0fe4a706297359de8bb880",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 94,
"avg_line_length": 46.90243902439025,
"alnum_prop": 0.7113884555382215,
"repo_name": "pgbidkar/vsphere-automation-sdk-python",
"id": "e5e48b99cd58efbd8123651f78659e75a2f11232",
"size": "3846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/vsphere/contentlibrary/lib/cls_api_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1232"
},
{
"name": "Python",
"bytes": "2656"
}
],
"symlink_target": ""
}
|
import uuid
import testtools
from heat.common import short_id
class ShortIdTest(testtools.TestCase):
def test_byte_string_8(self):
self.assertEqual('\xab', short_id._to_byte_string(0xab, 8))
self.assertEqual('\x05', short_id._to_byte_string(0x05, 8))
def test_byte_string_16(self):
self.assertEqual('\xab\xcd', short_id._to_byte_string(0xabcd, 16))
self.assertEqual('\x0a\xbc', short_id._to_byte_string(0xabc, 16))
def test_byte_string_12(self):
self.assertEqual('\xab\xc0', short_id._to_byte_string(0xabc, 12))
self.assertEqual('\x0a\xb0', short_id._to_byte_string(0x0ab, 12))
def test_byte_string_60(self):
val = 0x111111111111111
byte_string = short_id._to_byte_string(val, 60)
self.assertEqual('\x11\x11\x11\x11\x11\x11\x11\x10', byte_string)
def test_get_id_string(self):
id = short_id.get_id('11111111-1111-4111-bfff-ffffffffffff')
self.assertEqual('ceirceirceir', id)
def test_get_id_uuid_1(self):
source = uuid.UUID('11111111-1111-4111-bfff-ffffffffffff')
self.assertEqual(0x111111111111111, source.time)
self.assertEqual('ceirceirceir', short_id.get_id(source))
def test_get_id_uuid_f(self):
source = uuid.UUID('ffffffff-ffff-4fff-8000-000000000000')
self.assertEqual('777777777777', short_id.get_id(source))
def test_get_id_uuid_0(self):
source = uuid.UUID('00000000-0000-4000-bfff-ffffffffffff')
self.assertEqual('aaaaaaaaaaaa', short_id.get_id(source))
def test_get_id_uuid_endianness(self):
source = uuid.UUID('ffffffff-00ff-4000-aaaa-aaaaaaaaaaaa')
self.assertEqual('aaaa77777777', short_id.get_id(source))
def test_get_id_uuid1(self):
source = uuid.uuid1()
self.assertRaises(ValueError, short_id.get_id, source)
def test_generate_ids(self):
allowed_chars = 'abcdefghijklmnopqrstuvwxyz234567'
ids = [short_id.generate_id() for i in range(25)]
for id in ids:
self.assertEqual(12, len(id))
self.assertFalse(id.translate(None, allowed_chars))
self.assertEqual(1, ids.count(id))
|
{
"content_hash": "20acb615f31a34389c320f07244ad7eb",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 74,
"avg_line_length": 37.03389830508475,
"alnum_prop": 0.6530892448512586,
"repo_name": "redhat-openstack/heat",
"id": "0cb74704a4fa3346853357c23e47c703e8769eeb",
"size": "2760",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "heat/tests/test_short_id.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4827027"
},
{
"name": "Shell",
"bytes": "26720"
}
],
"symlink_target": ""
}
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from JeuxView import JeuxView
from UsersView import UsersView
from Session import Session
from ConnexionWidget import ConnexionWidget
from Utilisateur import Utilisateur
from functools import partial
from EmpruntsView import EmpruntsView
from ProfileView import ProfileView
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
# Definition de la zone centrale de la fenetre
zoneCentrale = QWidget()
self.session = None
# Connexion :: Ouverture du widget
self.toolbar = None
self.connexion()
# Connecté ou non ?
# Paramètres de la fenêtre
#self.resize(300,100) # Taille
self.setWindowTitle("Gestion Ludotheque") # Titre
self.setWindowIcon(QIcon('img/game.png')) # Icône
#self.jeux()
def connected(self):
self.session = self.conn.ActiveSession
EST_ADMIN = self.session.est_admin()
USERNAME = self.session.get_session_User().get_username()
self.toolbar.close()
self.toolbar = self.addToolBar('ToolBar')
## On fixe la toolbar pour plus qu'elle bouge.
self.toolbar.setMovable(False)
# Ajout d'items (=> Actions)
# ICONES ?
self.toolbar.addAction(QIcon('img/icon.png'),'Jeux',self.jeux)
# Sans ICONES ?
#self.toolbar.addAction('Jeux',self.jeux)
# Affichage de ce bouton seulement pour les admins
if EST_ADMIN:
self.toolbar.addSeparator();
# ICONES ?
self.toolbar.addAction(QIcon('img/user.png'),'Utilisateurs',self.user)
# Sans ICONES
#self.toolbar.addAction('Utilisateurs',self.user)
self.toolbar.addSeparator();
self.toolbar.addAction("Emprunts",self.emprunts)
self.toolbar.addSeparator();
# Séparation pour avoir les widgets à droite
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.toolbar.addWidget(spacer)
# Profil & Deconnexion
self.toolbar.addSeparator();
self.toolbar.addAction(USERNAME,self.profile)
self.toolbar.addSeparator();
self.toolbar.addAction('Deconnexion',self.logout)
self.jeux()
# Création d'une toolbar de menu
def logout(self):
self.toolbar.close()
del self.session
self.connexion()
def connexion(self):
if self.toolbar != None:
self.toolbar.close()
self.toolbar = self.addToolBar('ToolBar')
# On fixe la toolbar
self.toolbar.setMovable(False)
self.setBaseSize(400,200)
# Séparation pour avoir les widgets à droite
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.toolbar.addWidget(spacer)
self.toolbar.addAction("Afficher les jeux",self.jeux)
self.toolbar.addAction('Quitter',self.close)
self.conn = ConnexionWidget()
self.setCentralWidget(self.conn)
self.adjustSize()
def profile(self):
Profile=ProfileView(self.session)
self.setCentralWidget(Profile)
def user(self): # WIDGET USERS
widget = QWidget()
# Création du widget
self.Users = UsersView()
# Conteneur Vertical
VLayout = QVBoxLayout()
# Element 1 du Conteneur Vertical : Label
VLayout.addWidget(QLabel("Utilisateurs"))
# Layout de recherche
RechercheLayout = QHBoxLayout()
self.RechercheText = QLineEdit()
RechercheButton = QPushButton("Rechercher")
self.RechercheText.textEdited.connect(self.rechercheUser)
# Ajout des widgets au layout de recherche
RechercheLayout.addWidget(self.RechercheText)
RechercheLayout.addWidget(RechercheButton)
# Ajout du layout de recherche au layout principal
VLayout.addLayout(RechercheLayout)
# Connexion:
VLayout.addWidget(self.Users)
# Conteneur Horizontal pour boutons
Buttons = QHBoxLayout()
# Ajout d'un bouton (2)
AddUser = QPushButton("Ajouter un Utilisateur")
Buttons.addWidget(AddUser)
ReinitAll = QPushButton(u"Ré-initialiser tous abonnements")
Buttons.addWidget(ReinitAll)
# Ajout du conteneur horizontal au conteneur principal (vertical)
VLayout.addLayout(Buttons)
# On affecte le layout vertical au widget
widget.setLayout(VLayout)
# On change le widget central !
self.setCentralWidget(widget)
AddUser.clicked.connect(self.Users.AddUser)
ReinitAll.clicked.connect(self.Users.ReinitAll)
def emprunts(self):
Emprunts=EmpruntsView()
self.setCentralWidget(Emprunts)
def jeux(self): # WIDGET JEUX
if self.session == None:
self.toolbar.close()
self.toolbar = self.addToolBar('ToolBar')
#On refixe la toolbar.
self.toolbar.setMovable(False)
# Ajout d'items (=> Actions)
self.toolbar.addAction(QIcon('img/icon.png'),'Jeux',self.jeux)
# Séparation pour avoir les widgets à droite
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.toolbar.addWidget(spacer)
# Deconnexion
self.toolbar.addAction('Se connecter',self.connexion)
Jeux=JeuxView(session=self.session)
self.setCentralWidget(Jeux)
def rechercheUser(self):
self.Users.searchmydata(str(self.RechercheText.text()))
|
{
"content_hash": "ac21f2dad07af55a7616588109acdbd4",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 82,
"avg_line_length": 29.723958333333332,
"alnum_prop": 0.6379884352549501,
"repo_name": "Darkyler/Piscine",
"id": "0aec1ea254306ba14499d19bcb000afbdc553f3d",
"size": "5761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MainWindow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "161698"
}
],
"symlink_target": ""
}
|
"""Provide tests for mysensors notify platform."""
from __future__ import annotations
from collections.abc import Callable
from unittest.mock import MagicMock, call
from mysensors.sensor import Sensor
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
async def test_text_type(
hass: HomeAssistant,
text_node: Sensor,
transport_write: MagicMock,
integration: MockConfigEntry,
) -> None:
"""Test a text type child."""
# Test without target.
await hass.services.async_call(
NOTIFY_DOMAIN, "mysensors", {"message": "Hello World"}, blocking=True
)
assert transport_write.call_count == 1
assert transport_write.call_args == call("1;1;1;0;47;Hello World\n")
# Test with target.
await hass.services.async_call(
NOTIFY_DOMAIN,
"mysensors",
{"message": "Hello", "target": "Text Node 1 1"},
blocking=True,
)
assert transport_write.call_count == 2
assert transport_write.call_args == call("1;1;1;0;47;Hello\n")
transport_write.reset_mock()
# Test a message longer than 25 characters.
await hass.services.async_call(
NOTIFY_DOMAIN,
"mysensors",
{
"message": "This is a long message that will be split",
"target": "Text Node 1 1",
},
blocking=True,
)
assert transport_write.call_count == 2
assert transport_write.call_args_list == [
call("1;1;1;0;47;This is a long message th\n"),
call("1;1;1;0;47;at will be split\n"),
]
async def test_text_type_discovery(
hass: HomeAssistant,
text_node: Sensor,
transport_write: MagicMock,
receive_message: Callable[[str], None],
) -> None:
"""Test text type discovery."""
receive_message("1;2;0;0;36;\n")
receive_message("1;2;1;0;47;test\n")
receive_message("1;2;1;0;47;test2\n") # Test that more than one set message works.
await hass.async_block_till_done()
# Test targeting the discovered child.
await hass.services.async_call(
NOTIFY_DOMAIN,
"mysensors",
{"message": "Hello", "target": "Text Node 1 2"},
blocking=True,
)
assert transport_write.call_count == 1
assert transport_write.call_args == call("1;2;1;0;47;Hello\n")
transport_write.reset_mock()
# Test targeting all notify children.
await hass.services.async_call(
NOTIFY_DOMAIN, "mysensors", {"message": "Hello World"}, blocking=True
)
assert transport_write.call_count == 2
assert transport_write.call_args_list == [
call("1;1;1;0;47;Hello World\n"),
call("1;2;1;0;47;Hello World\n"),
]
|
{
"content_hash": "7a08d1dc6b2081f9eaa1fc23e7264067",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 87,
"avg_line_length": 28.894736842105264,
"alnum_prop": 0.6378870673952641,
"repo_name": "toddeye/home-assistant",
"id": "e96b463cc783a48426e12c96fc2b0b09ab104b65",
"size": "2745",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/mysensors/test_notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import numpy as np
import theano
import theano.d3viz as d3v
from theano import tensor
from blocks import roles
from blocks.roles import OUTPUT
from blocks.model import Model
from blocks.extensions import saveload
from blocks.filter import VariableFilter
from utils import MainLoop
from config import config
from model import nn_fprop
from utils import pre_process_image, load_encoder, encode_image, decode_image
import argparse
import sys
import os
import pandas as pd
import time
import signal
from pandas.parser import CParserError
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numpy import dtype
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Float32MultiArray
from cv_bridge import CvBridge, CvBridgeError
import cv2
import PIL
import scipy
locals().update(config)
sceneStateFile = os.path.abspath("predictions/sceneState")
def load_models(model_path=save_path, in_size=len(input_columns),
out_size=len(output_columns) - 1 if cost_mode == 'RL-MDN' else len(output_columns),
hidden_size=hidden_size, num_recurrent_layers=num_recurrent_layers, model=layer_models[0]):
initials = []
if not os.path.isfile(model_path):
print 'Could not find model file.'
sys.exit(0)
print 'Loading model from {0}...'.format(model_path)
x = tensor.tensor3('features', dtype=theano.config.floatX)
y = tensor.tensor3('targets', dtype='floatX')
train_flag = [theano.shared(0)]
_, latent_size = load_encoder()
in_size = latent_size + len(input_columns)
y_hat, cost, cells = nn_fprop(x, y, in_size, out_size, hidden_size, num_recurrent_layers, train_flag)
main_loop = MainLoop(algorithm=None, data_stream=None, model=Model(cost),
extensions=[saveload.Load(model_path)])
for extension in main_loop.extensions:
extension.main_loop = main_loop
main_loop._run_extensions('before_training')
bin_model = main_loop.model
print 'Model loaded. Building prediction function...'
hiddens = []
for i in range(num_recurrent_layers):
brick = [b for b in bin_model.get_top_bricks() if b.name == layer_models[i] + str(i)][0]
hiddens.extend(VariableFilter(theano_name=brick.name + '_apply_states')(bin_model.variables))
hiddens.extend(VariableFilter(theano_name=brick.name + '_apply_cells')(cells))
initials.extend(VariableFilter(roles=[roles.INITIAL_STATE])(brick.parameters))
predict_func = theano.function([x], hiddens + [y_hat])
encoder, code_size = load_encoder()
return predict_func, initials, encoder, code_size
def predict_one_timestep(predict_func, encoder, code_size, initials, x, out_size, iteration):
try:
img = CvBridge().imgmsg_to_cv2(camera1_msg, "bgr8")
img = np.array(img, dtype=np.float)
img = img[0:540, 250:840]
cv2.imwrite('predictions/current_image.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
except (CvBridgeError) as e:
print(e)
else:
image = PIL.Image.open('predictions/current_image.jpg')
current_scene_image = pre_process_image(image)
cv2.imshow('Input image', cv2.resize(np.array(current_scene_image.transpose((1, 2, 0)))[...,::-1], (0,0), fx=4, fy=4, interpolation=cv2.INTER_NEAREST ))
cv2.waitKey(10)
current_scene_image = np.array(current_scene_image, dtype=np.float32)
images = np.array([current_scene_image])
_, encoded_images= encode_image(images, encoder)
decoded_images = decode_image(encoded_images, encoder)
cv2.imshow('Reconstructed image', cv2.resize(np.array(decoded_images[0].transpose((1, 2, 0)))[...,::-1], (0,0), fx=4, fy=4, interpolation=cv2.INTER_NEAREST ))
cv2.waitKey(10)
x = np.concatenate([encoded_images[0], x])
newinitials = predict_func([[x]])
raw_prediction = newinitials.pop().astype(theano.config.floatX)
if single_dim_out:
predicted_values = raw_prediction[:, -1, -1].astype(theano.config.floatX).reshape((len(raw_prediction),))
else:
predicted_values = raw_prediction[-1, -1, :].astype(theano.config.floatX)
layer = 0
for initial, newinitial in zip(initials, newinitials):
if iteration % layer_resolutions[layer // 2] == 0:
initial.set_value(newinitial[-1].flatten())
layer += (2 if layer_models[layer // 2] == 'mt_rnn' else 1)
layer = min([layer, len(layer_resolutions)])
return predicted_values, newinitials
def set_task_column_to_one_hot(data):
if config['multi_task_mode'] == 'ID':
for i in config['game_tasks']:
data['task' + str(i)] = 0
data.loc[data['task'] == i, 'task' + str(i)] = 1
return data
def plot_arrays(arrays, title='image'):
images = []
for i in range(int(len(arrays)/8)):
images.append(np.hstack(arrays[i*8:(i+1)*8]))
images = np.vstack(images)
vis = cv2.cvtColor(np.array(images, np.float32), cv2.COLOR_GRAY2BGR)
vis = cv2.resize(vis, (0,0), fx=5, fy=5, interpolation=cv2.INTER_NEAREST )
cv2.imshow(title, vis)
cv2.waitKey(10)
def sample():
if plot_hidden_states:
plt.ion()
plt.ylim([-2, +4])
plt.show()
predict_func, initials, encoder, code_size = load_models()
print("Generating trajectory...")
last_time = 0
counter = 0
out_size = len(output_columns) - 1 if cost_mode == 'RL-MDN' else len(output_columns)
last_speed_calc = time.time()
predicted = np.array([0.749, 0.785, 0.613, 0.459, 0.679, 1., 0.])
last_prediction = predicted.copy()
hidden_states = np.empty((num_recurrent_layers, hidden_size), dtype='float32')
active_hidden_states = np.empty((num_recurrent_layers, hidden_size), dtype='float32')
for iteration in range(10000000):
try:
try:
command_msg = Float32MultiArray()
command_msg.data = predicted[0:out_size]
print predicted
robot_command_pub.publish(command_msg)
except IOError:
print 'could not open the prediction file.'
prediction_diff = ((last_prediction[0:out_size] - predicted[0:out_size]) ** 2).mean()
min_wait = np.clip(0.2 + prediction_diff * 80, .2, .5)
time.sleep(min_wait)
while True:
new_state = pd.DataFrame({'task': [task_to_perform], 'time': [time.time()], 'gripper': [predicted[0]], 'joint1': [predicted[1]],
'joint2': [predicted[2]], 'joint3': [predicted[3]], 'joint4': [predicted[4]], 'joint5': [predicted[5]]})
new_state = set_task_column_to_one_hot(new_state)
print np.array(new_state[input_columns].iloc[0], dtype=theano.config.floatX)
if last_time == new_state['time'][0]:
time.sleep(.005)
continue
else:
break
last_time = new_state['time'][0]
x = np.array(new_state[input_columns].iloc[0], dtype=theano.config.floatX)
predicted, newinitials = predict_one_timestep(predict_func, encoder, code_size, initials, x, out_size, iteration)
last_prediction = predicted.copy()
if plot_hidden_states:
plot_arrays(newinitials)
except(RuntimeError):
print sys.exc_info()[0]
counter += 1
if (time.time() - last_speed_calc > 1):
counter = 0
last_speed_calc = time.time()
if __name__ == '__main__':
if robot == 'al5d' or robot == 'mico':
rospy.init_node('roboinstruct')
def camera1_callback(msg):
global camera1_msg
camera1_msg = msg
def move_callback(msg):
global move_msg
global last_move_msg_time
move_msg = msg
last_move_msg_time = time.time()
image_sub = rospy.Subscriber(camera1_image_topic, Image, camera1_callback)
image_sub = rospy.Subscriber("/move_info_for_test", Float32MultiArray, move_callback)
robot_command_pub = rospy.Publisher("/robot_command", Float32MultiArray, queue_size=100)
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
locals().update(config)
float_formatter = lambda x: "%.5f" % x
np.set_printoptions(formatter={'float_kind': float_formatter})
sample()
|
{
"content_hash": "03c3f7a2f8f1fb3bd1e9fcc0da1b7b6e",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 162,
"avg_line_length": 43.12820512820513,
"alnum_prop": 0.6329369797859691,
"repo_name": "rrahmati/roboinstruct-2",
"id": "538e697d4593def0f53036dd515e484252dfb777",
"size": "8410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "254666"
},
{
"name": "CMake",
"bytes": "1139"
},
{
"name": "Python",
"bytes": "125844"
}
],
"symlink_target": ""
}
|
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pyannote-metrics-"
cfg.versionfile_source = "pyannote/metrics/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
{
"content_hash": "fab59faafa1ce043526b0cd5d645aa42",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 79,
"avg_line_length": 34.01777777777778,
"alnum_prop": 0.5717925267833812,
"repo_name": "pyannote/pyannote-metrics",
"id": "b2d4fa9890b77ca5af887d84e08ab2ce973e818f",
"size": "15783",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyannote/metrics/_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "48601"
},
{
"name": "Python",
"bytes": "265388"
}
],
"symlink_target": ""
}
|
import sys
import signal
import subprocess
import logging
from time import sleep
from threading import Thread
from datetime import datetime
from termcolor import colored, cprint
from errors import DeviceError, KernelError
class Filter():
LAN_INTERFACE = "eth0"
WAN_INTERFACE = "eth1"
DEF_HTB_RATE = "1000Mbit" #Rate of the def bucket
USER_UP_RATE = "1050kbit"
USER_UP_CEIL_RATE = "1200kbit"
USER_DOWN_RATE = "4100Kbit"
USER_DOWN_CEIL_RATE = "4400Kbit"
wan_ip_prefs = set()
lan_ip_prefs = set()
def console(self, exe):
#cprint("\t"+exe, 'cyan')
logging.debug("\tIN: "+str(exe))
proc = subprocess.Popen("nice -n10 "+exe, shell=True,\
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()
#cprint("\tOut: "+str(len(output))+":"+str(output), 'white')
logging.debug(str(output))
for x in output:
if 'We have an error talking to the kernel' in x:
raise KernelError(datetime.now())
#if 'Device or resource busy' in x:
# raise DeviceError(exe)
def destroy_tc_rules(self):
cprint("Destroy TC Rules", 'red')
logging.info('Destroy TC Rules')
#Delete rules
self.console('tc qdisc del dev '+self.WAN_INTERFACE+' root')
self.console('tc qdisc del dev '+self.LAN_INTERFACE+' root')
def init_tc_rules(self):
cprint("Init TC Rules", 'red')
logging.info('Init TC Rules')
#Delete previous rules
self.console('tc qdisc del dev '+self.WAN_INTERFACE+' root')
self.console('tc qdisc del dev '+self.LAN_INTERFACE+' root')
self.console('tc qdisc add dev '+self.WAN_INTERFACE+\
' root handle 1:0 htb default FFFF')
self.console('tc class add dev '+self.WAN_INTERFACE+\
' parent 1:0 classid 1:FFFF htb rate '+self.DEF_HTB_RATE+\
' ceil '+self.DEF_HTB_RATE+' prio 0')
self.console('tc qdisc add dev '+self.LAN_INTERFACE+\
' root handle 1:0 htb default FFFF')
self.console('tc class add dev '+self.LAN_INTERFACE+\
' parent 1:0 classid 1:FFFF htb rate '+self.DEF_HTB_RATE+\
' ceil '+self.DEF_HTB_RATE+' prio 0')
#Creates a new TC Filter Rule
def tc_add_device(self, obj):
cprint("Add Device: "+obj["mac"], 'yellow')
logging.info("Add Device: "+obj["mac"])
self.console('tc class add dev '+self.LAN_INTERFACE+\
' parent 1:0 classid 1:'+str(obj['token'])+\
' htb rate '+self.USER_DOWN_RATE+' ceil '+\
self.USER_DOWN_CEIL_RATE+' prio 1')
self.console('tc class add dev '+self.WAN_INTERFACE+\
' parent 1:0 classid 1:'+str(obj['token'])+\
' htb rate '+self.USER_UP_RATE+' ceil '+\
self.USER_UP_CEIL_RATE+' prio 1')
def tc_add_filter(self, token, ip, obj):
#IPv6
if ':' in ip:
self.tc_add_filter_IPv6(token, ip, obj)
#IPv4
elif '.' in ip:
self.tc_add_filter_IPv4(token, ip, obj)
def tc_add_filter_IPv4(self, token, ip, obj):
cprint("\t Add IPv4 Filter: "+ip+" for "+obj["mac"], 'yellow')
logging.info("\t Add IPv4 Filter: "+ip+" for "+obj["mac"])
self.console('tc filter add dev '+self.LAN_INTERFACE+\
' protocol ip parent 1:0 prio 0 u32 match ip dst '+ip+\
' flowid 1:'+str(token))
pref = self.tc_get_new_filter_pref(self.LAN_INTERFACE)
obj["prefs"]["lan"][ip] = pref
self.lan_ip_prefs |= pref
self.console('tc filter add dev '+self.WAN_INTERFACE+\
' protocol ip parent 1:0 prio 0 u32 match ip src '+ip+\
' flowid 1:'+str(token))
pref = self.tc_get_new_filter_pref(self.WAN_INTERFACE)
obj["prefs"]["wan"][ip] = pref
self.wan_ip_prefs |= pref
def tc_add_filter_IPv6(self, token, ip, obj):
cprint("\t Add IPv6 Filter: "+ip+" for "+obj["mac"], 'yellow')
logging.info("\t Add IPv6 Filter: "+ip+" for "+obj["mac"])
self.console('tc filter add dev '+self.LAN_INTERFACE+\
' protocol ipv6 parent 1:0 prio 0 u32 match ip6 dst '+ip+\
' flowid 1:'+str(token))
pref = self.tc_get_new_filter_pref(self.LAN_INTERFACE)
obj["prefs"]["lan"][ip] = pref
self.lan_ip_prefs |= pref
logging.debug("\t new Filter LAN Pref: "+ str(pref))
self.console('tc filter add dev '+self.WAN_INTERFACE+\
' protocol ipv6 parent 1:0 prio 0 u32 match ip6 src '+ip+\
' flowid 1:'+str(token))
pref = self.tc_get_new_filter_pref(self.WAN_INTERFACE)
obj["prefs"]["wan"][ip] = pref
self.wan_ip_prefs |= pref
logging.debug("\t new Filter WAN Pref: "+ str(pref))
def tc_del_filter(self, token, ip, obj):
cprint("\t Del Filter: "+ip+" for "+obj["mac"], 'yellow')
logging.info("\t Del Filter: "+ip+" for "+obj["mac"])
#Delete LAN filters
for pref in obj["prefs"]["lan"][ip]:
self.console('tc filter del dev '+self.LAN_INTERFACE+' pref '+pref)
logging.debug("\t delete Filter LAN Pref: "+ str(pref))
self.lan_ip_prefs = self.lan_ip_prefs - obj["prefs"]["lan"][ip]
del obj["prefs"]["lan"][ip]
#Delete WAN filters
for pref in obj["prefs"]["wan"][ip]:
self.console('tc filter del dev '+self.WAN_INTERFACE+' pref '+pref)
logging.debug("\t delete Filter WAN Pref: "+ str(pref))
self.wan_ip_prefs = self.wan_ip_prefs - obj["prefs"]["wan"][ip]
del obj["prefs"]["wan"][ip]
#Returns the id of the filter
def tc_get_new_filter_pref(self, interface):
prefs = set()
proc = subprocess.Popen('tc -p filter list dev '+interface+\
' parent 1:0', shell=True,\
stdout=subprocess.PIPE)
output = proc.communicate()
lines = output[0].split('\n')
for line in lines:
if 'pref' in line:
words = line.split()
pref = words[words.index('pref')+1]
prefs = prefs | set([pref])
if interface == self.WAN_INTERFACE:
return prefs - self.wan_ip_prefs
elif interface == self.LAN_INTERFACE:
return prefs - self.lan_ip_prefs
def tc_del_class(self, token, obj):
cprint("Delete Client: "+obj['mac'], 'red')
self.console('tc class del dev '+self.LAN_INTERFACE+\
' parent 1:0 classid 1:'+str(token))
self.console('tc class del dev '+self.WAN_INTERFACE+\
' parent 1:0 classid 1:'+str(token))
|
{
"content_hash": "53a4db57f8d85089bdcdf4b24288fe73",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 79,
"avg_line_length": 39.11797752808989,
"alnum_prop": 0.5461726267413471,
"repo_name": "ABalanuta/TrafficShapingRouter",
"id": "aed4200862abad591756fab32d4181aab09df829",
"size": "6963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16711"
},
{
"name": "Shell",
"bytes": "55"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_addparentchild'),
]
operations = [
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=100, unique=True),
),
]
|
{
"content_hash": "7a1eea90a0b2a56745f229efa71f073e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 21.5,
"alnum_prop": 0.5755813953488372,
"repo_name": "bmihelac/django-import-export",
"id": "dc9c57681ee106e647d09d0cbce46a1d6f7b1bf0",
"size": "344",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/core/migrations/0006_auto_20171130_0147.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "8297"
},
{
"name": "JavaScript",
"bytes": "777"
},
{
"name": "Python",
"bytes": "184539"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
}
|
from nameko.rpc import rpc
class GreetingService:
name = "greeting_service"
@rpc
def hello(self, name):
return "Hello, {}!".format(name)
|
{
"content_hash": "0b3321f9f05f2c18a3ceea5b92981aac",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 40,
"avg_line_length": 17.77777777777778,
"alnum_prop": 0.63125,
"repo_name": "adamcharnock/lightbus",
"id": "76bf41f23aaf5578582a41b24556677cb5a60a09",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightbus_experiments/nameko/helloworld.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "710699"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.