gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from tests.test_helper import *
class TestTransaction(unittest.TestCase):
def test_sale_returns_a_successful_result_with_type_of_sale(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEqual(None, re.search("\A\w{6}\Z", transaction.id))
self.assertEquals(Transaction.Type.Sale, transaction.type)
self.assertEquals(Decimal(TransactionAmounts.Authorize), transaction.amount)
self.assertEquals("411111", transaction.credit_card_details.bin)
self.assertEquals("1111", transaction.credit_card_details.last_4)
self.assertEquals("05/2009", transaction.credit_card_details.expiration_date)
def test_sale_allows_amount_as_a_decimal(self):
result = Transaction.sale({
"amount": Decimal(TransactionAmounts.Authorize),
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEqual(None, re.search("\A\w{6}\Z", transaction.id))
self.assertEquals(Transaction.Type.Sale, transaction.type)
self.assertEquals(Decimal(TransactionAmounts.Authorize), transaction.amount)
self.assertEquals("411111", transaction.credit_card_details.bin)
self.assertEquals("1111", transaction.credit_card_details.last_4)
self.assertEquals("05/2009", transaction.credit_card_details.expiration_date)
def test_sale_with_expiration_month_and_year_separately(self):
result = Transaction.sale({
"amount": Decimal(TransactionAmounts.Authorize),
"credit_card": {
"number": "4111111111111111",
"expiration_month": "05",
"expiration_year": "2012"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(Transaction.Type.Sale, transaction.type)
self.assertEquals("05", transaction.credit_card_details.expiration_month)
self.assertEquals("2012", transaction.credit_card_details.expiration_year)
def test_sale_works_with_all_attributes(self):
result = Transaction.sale({
"amount": "100.00",
"order_id": "123",
"credit_card": {
"cardholder_name": "The Cardholder",
"number": "5105105105105100",
"expiration_date": "05/2011",
"cvv": "123"
},
"customer": {
"first_name": "Dan",
"last_name": "Smith",
"company": "Braintree Payment Solutions",
"email": "dan@example.com",
"phone": "419-555-1234",
"fax": "419-555-1235",
"website": "http://braintreepaymentsolutions.com"
},
"billing": {
"first_name": "Carl",
"last_name": "Jones",
"company": "Braintree",
"street_address": "123 E Main St",
"extended_address": "Suite 403",
"locality": "Chicago",
"region": "IL",
"postal_code": "60622",
"country_name": "United States of America",
"country_code_alpha2": "US",
"country_code_alpha3": "USA",
"country_code_numeric": "840"
},
"shipping": {
"first_name": "Andrew",
"last_name": "Mason",
"company": "Braintree",
"street_address": "456 W Main St",
"extended_address": "Apt 2F",
"locality": "Bartlett",
"region": "IL",
"postal_code": "60103",
"country_name": "Mexico",
"country_code_alpha2": "MX",
"country_code_alpha3": "MEX",
"country_code_numeric": "484"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEquals(None, re.search("\A\w{6}\Z", transaction.id))
self.assertEquals(Transaction.Type.Sale, transaction.type)
self.assertEquals(Transaction.Status.Authorized, transaction.status)
self.assertEquals(Decimal("100.00"), transaction.amount)
self.assertEquals("123", transaction.order_id)
self.assertEquals("1000", transaction.processor_response_code)
self.assertEquals(datetime, type(transaction.created_at))
self.assertEquals(datetime, type(transaction.updated_at))
self.assertEquals("510510", transaction.credit_card_details.bin)
self.assertEquals("5100", transaction.credit_card_details.last_4)
self.assertEquals("510510******5100", transaction.credit_card_details.masked_number)
self.assertEquals("MasterCard", transaction.credit_card_details.card_type)
self.assertEquals("The Cardholder", transaction.credit_card_details.cardholder_name)
self.assertEquals(None, transaction.avs_error_response_code)
self.assertEquals("M", transaction.avs_postal_code_response_code)
self.assertEquals("M", transaction.avs_street_address_response_code)
self.assertEquals("Dan", transaction.customer_details.first_name)
self.assertEquals("Smith", transaction.customer_details.last_name)
self.assertEquals("Braintree Payment Solutions", transaction.customer_details.company)
self.assertEquals("dan@example.com", transaction.customer_details.email)
self.assertEquals("419-555-1234", transaction.customer_details.phone)
self.assertEquals("419-555-1235", transaction.customer_details.fax)
self.assertEquals("http://braintreepaymentsolutions.com", transaction.customer_details.website)
self.assertEquals("Carl", transaction.billing_details.first_name)
self.assertEquals("Jones", transaction.billing_details.last_name)
self.assertEquals("Braintree", transaction.billing_details.company)
self.assertEquals("123 E Main St", transaction.billing_details.street_address)
self.assertEquals("Suite 403", transaction.billing_details.extended_address)
self.assertEquals("Chicago", transaction.billing_details.locality)
self.assertEquals("IL", transaction.billing_details.region)
self.assertEquals("60622", transaction.billing_details.postal_code)
self.assertEquals("United States of America", transaction.billing_details.country_name)
self.assertEquals("US", transaction.billing_details.country_code_alpha2)
self.assertEquals("USA", transaction.billing_details.country_code_alpha3)
self.assertEquals("840", transaction.billing_details.country_code_numeric)
self.assertEquals("Andrew", transaction.shipping_details.first_name)
self.assertEquals("Mason", transaction.shipping_details.last_name)
self.assertEquals("Braintree", transaction.shipping_details.company)
self.assertEquals("456 W Main St", transaction.shipping_details.street_address)
self.assertEquals("Apt 2F", transaction.shipping_details.extended_address)
self.assertEquals("Bartlett", transaction.shipping_details.locality)
self.assertEquals("IL", transaction.shipping_details.region)
self.assertEquals("60103", transaction.shipping_details.postal_code)
self.assertEquals("Mexico", transaction.shipping_details.country_name)
self.assertEquals("MX", transaction.shipping_details.country_code_alpha2)
self.assertEquals("MEX", transaction.shipping_details.country_code_alpha3)
self.assertEquals("484", transaction.shipping_details.country_code_numeric)
def test_sale_with_vault_customer_and_credit_card_data(self):
customer = Customer.create({
"first_name": "Pingu",
"last_name": "Penguin",
}).customer
result = Transaction.sale({
"amount": Decimal(TransactionAmounts.Authorize),
"customer_id": customer.id,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(transaction.credit_card_details.masked_number, "411111******1111")
self.assertEquals(None, transaction.vault_credit_card)
def test_sale_with_vault_customer_and_credit_card_data_and_store_in_vault(self):
customer = Customer.create({
"first_name": "Pingu",
"last_name": "Penguin",
}).customer
result = Transaction.sale({
"amount": Decimal(TransactionAmounts.Authorize),
"customer_id": customer.id,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"options": {
"store_in_vault": True
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals("411111******1111", transaction.credit_card_details.masked_number)
self.assertEquals("411111******1111", transaction.vault_credit_card.masked_number)
def test_sale_with_custom_fields(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"custom_fields": {
"store_me": "some extra stuff"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals("some extra stuff", transaction.custom_fields["store_me"])
def test_sale_with_merchant_account_id(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"merchant_account_id": TestHelper.non_default_merchant_account_id,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(TestHelper.non_default_merchant_account_id, transaction.merchant_account_id)
def test_sale_without_merchant_account_id_falls_back_to_default(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(TestHelper.default_merchant_account_id, transaction.merchant_account_id)
def test_sale_with_shipping_address_id(self):
result = Customer.create({
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010"
}
})
self.assertTrue(result.is_success)
customer = result.customer
result = Address.create({
"customer_id": customer.id,
"street_address": "123 Fake St."
})
self.assertTrue(result.is_success)
address = result.address
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"customer_id": customer.id,
"shipping_address_id": address.id,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals("123 Fake St.", transaction.shipping_details.street_address)
self.assertEquals(address.id, transaction.shipping_details.id)
def test_sale_with_level_2(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"purchase_order_number": "12345",
"tax_amount": Decimal("10.00"),
"tax_exempt": True,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals("12345", transaction.purchase_order_number)
self.assertEquals(Decimal("10.00"), transaction.tax_amount)
self.assertEquals(True, transaction.tax_exempt)
def test_create_with_failing_level_2_validations(self):
result = Transaction.sale({
"amount": Decimal("100"),
"tax_amount": "asdf",
"purchase_order_number": "aaaaaaaaaaaaaaaaaa",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Transaction.TaxAmountFormatIsInvalid,
result.errors.for_object("transaction").on("tax_amount")[0].code
)
self.assertEquals(
ErrorCodes.Transaction.PurchaseOrderNumberIsTooLong,
result.errors.for_object("transaction").on("purchase_order_number")[0].code
)
def test_sale_with_processor_declined(self):
result = Transaction.sale({
"amount": TransactionAmounts.Decline,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertFalse(result.is_success)
transaction = result.transaction
self.assertEquals(Transaction.Status.ProcessorDeclined, transaction.status)
def test_sale_with_gateway_rejected_with_avs(self):
old_merchant_id = Configuration.merchant_id
old_public_key = Configuration.public_key
old_private_key = Configuration.private_key
try:
Configuration.merchant_id = "processing_rules_merchant_id"
Configuration.public_key = "processing_rules_public_key"
Configuration.private_key = "processing_rules_private_key"
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"billing": {
"street_address": "200 Fake Street"
},
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertFalse(result.is_success)
transaction = result.transaction
self.assertEquals(Transaction.GatewayRejectionReason.Avs, transaction.gateway_rejection_reason)
finally:
Configuration.merchant_id = old_merchant_id
Configuration.public_key = old_public_key
Configuration.private_key = old_private_key
def test_sale_with_gateway_rejected_with_avs_and_cvv(self):
old_merchant_id = Configuration.merchant_id
old_public_key = Configuration.public_key
old_private_key = Configuration.private_key
try:
Configuration.merchant_id = "processing_rules_merchant_id"
Configuration.public_key = "processing_rules_public_key"
Configuration.private_key = "processing_rules_private_key"
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"billing": {
"postal_code": "20000"
},
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "200"
}
})
self.assertFalse(result.is_success)
transaction = result.transaction
self.assertEquals(Transaction.GatewayRejectionReason.AvsAndCvv, transaction.gateway_rejection_reason)
finally:
Configuration.merchant_id = old_merchant_id
Configuration.public_key = old_public_key
Configuration.private_key = old_private_key
def test_sale_with_gateway_rejected_with_cvv(self):
old_merchant_id = Configuration.merchant_id
old_public_key = Configuration.public_key
old_private_key = Configuration.private_key
try:
Configuration.merchant_id = "processing_rules_merchant_id"
Configuration.public_key = "processing_rules_public_key"
Configuration.private_key = "processing_rules_private_key"
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "200"
}
})
self.assertFalse(result.is_success)
transaction = result.transaction
self.assertEquals(Transaction.GatewayRejectionReason.Cvv, transaction.gateway_rejection_reason)
finally:
Configuration.merchant_id = old_merchant_id
Configuration.public_key = old_public_key
Configuration.private_key = old_private_key
def test_validation_error_on_invalid_custom_fields(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"custom_fields": {
"invalid_key": "some extra stuff"
}
})
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Transaction.CustomFieldIsInvalid,
result.errors.for_object("transaction").on("custom_fields")[0].code
)
def test_create_can_stuff_customer_and_credit_card_in_the_vault(self):
result = Transaction.sale({
"amount": "100",
"customer": {
"first_name": "Adam",
"last_name": "Williams"
},
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"options": {
"store_in_vault": True
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEqual(None, re.search("\A\d{6,7}\Z", transaction.customer_details.id))
self.assertEquals(transaction.customer_details.id, transaction.vault_customer.id)
self.assertNotEqual(None, re.search("\A\w{4,5}\Z", transaction.credit_card_details.token))
self.assertEquals(transaction.credit_card_details.token, transaction.vault_credit_card.token)
def test_create_associated_a_billing_address_with_credit_card_in_vault(self):
result = Transaction.sale({
"amount": "100",
"customer": {
"first_name": "Adam",
"last_name": "Williams"
},
"credit_card": {
"number": "5105105105105100",
"expiration_date": "05/2012"
},
"billing": {
"first_name": "Carl",
"last_name": "Jones",
"company": "Braintree",
"street_address": "123 E Main St",
"extended_address": "Suite 403",
"locality": "Chicago",
"region": "IL",
"postal_code": "60622",
"country_name": "United States of America"
},
"options": {
"store_in_vault": True,
"add_billing_address_to_payment_method": True,
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEquals(None, re.search("\A\d{6,7}\Z", transaction.customer_details.id))
self.assertEquals(transaction.customer_details.id, transaction.vault_customer.id)
credit_card = CreditCard.find(transaction.vault_credit_card.token)
self.assertEquals(credit_card.billing_address.id, transaction.billing_details.id)
self.assertEquals(credit_card.billing_address.id, transaction.vault_billing_address.id)
self.assertEquals("Carl", credit_card.billing_address.first_name)
self.assertEquals("Jones", credit_card.billing_address.last_name)
self.assertEquals("Braintree", credit_card.billing_address.company)
self.assertEquals("123 E Main St", credit_card.billing_address.street_address)
self.assertEquals("Suite 403", credit_card.billing_address.extended_address)
self.assertEquals("Chicago", credit_card.billing_address.locality)
self.assertEquals("IL", credit_card.billing_address.region)
self.assertEquals("60622", credit_card.billing_address.postal_code)
self.assertEquals("United States of America", credit_card.billing_address.country_name)
def test_create_and_store_the_shipping_address_in_the_vault(self):
result = Transaction.sale({
"amount": "100",
"customer": {
"first_name": "Adam",
"last_name": "Williams"
},
"credit_card": {
"number": "5105105105105100",
"expiration_date": "05/2012"
},
"shipping": {
"first_name": "Carl",
"last_name": "Jones",
"company": "Braintree",
"street_address": "123 E Main St",
"extended_address": "Suite 403",
"locality": "Chicago",
"region": "IL",
"postal_code": "60622",
"country_name": "United States of America"
},
"options": {
"store_in_vault": True,
"store_shipping_address_in_vault": True,
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEquals(None, re.search("\A\d{6,7}\Z", transaction.customer_details.id))
self.assertEquals(transaction.customer_details.id, transaction.vault_customer.id)
shipping_address = transaction.vault_customer.addresses[0]
self.assertEquals("Carl", shipping_address.first_name)
self.assertEquals("Jones", shipping_address.last_name)
self.assertEquals("Braintree", shipping_address.company)
self.assertEquals("123 E Main St", shipping_address.street_address)
self.assertEquals("Suite 403", shipping_address.extended_address)
self.assertEquals("Chicago", shipping_address.locality)
self.assertEquals("IL", shipping_address.region)
self.assertEquals("60622", shipping_address.postal_code)
self.assertEquals("United States of America", shipping_address.country_name)
def test_create_submits_for_settlement_if_given_submit_for_settlement_option(self):
result = Transaction.sale({
"amount": "100",
"credit_card": {
"number": "5105105105105100",
"expiration_date": "05/2012"
},
"options": {
"submit_for_settlement": True
}
})
self.assertTrue(result.is_success)
self.assertEquals(Transaction.Status.SubmittedForSettlement, result.transaction.status)
def test_create_does_not_submit_for_settlement_if_submit_for_settlement_is_false(self):
result = Transaction.sale({
"amount": "100",
"credit_card": {
"number": "5105105105105100",
"expiration_date": "05/2012"
},
"options": {
"submit_for_settlement": False
}
})
self.assertTrue(result.is_success)
self.assertEquals(Transaction.Status.Authorized, result.transaction.status)
def test_create_can_specify_the_customer_id_and_payment_method_token(self):
customer_id = "customer_" + str(random.randint(1, 1000000))
payment_method_token = "credit_card_" + str(random.randint(1, 1000000))
result = Transaction.sale({
"amount": "100",
"customer": {
"id": customer_id,
"first_name": "Adam",
"last_name": "Williams"
},
"credit_card": {
"token": payment_method_token,
"number": "5105105105105100",
"expiration_date": "05/2012"
},
"options": {
"store_in_vault": True
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(customer_id, transaction.customer_details.id)
self.assertEquals(customer_id, transaction.vault_customer.id)
self.assertEquals(payment_method_token, transaction.credit_card_details.token)
self.assertEquals(payment_method_token, transaction.vault_credit_card.token)
def test_create_using_customer_id(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100"
}
})
self.assertTrue(result.is_success)
customer = result.customer
credit_card = customer.credit_cards[0]
result = Transaction.sale({
"amount": "100",
"customer_id": customer.id
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(customer.id, transaction.customer_details.id)
self.assertEquals(customer.id, transaction.vault_customer.id)
self.assertEquals(credit_card.token, transaction.credit_card_details.token)
self.assertEquals(credit_card.token, transaction.vault_credit_card.token)
def test_create_using_payment_method_token(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100"
}
})
self.assertTrue(result.is_success)
customer = result.customer
credit_card = customer.credit_cards[0]
result = Transaction.sale({
"amount": "100",
"payment_method_token": credit_card.token
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(customer.id, transaction.customer_details.id)
self.assertEquals(customer.id, transaction.vault_customer.id)
self.assertEquals(credit_card.token, transaction.credit_card_details.token)
self.assertEquals(credit_card.token, transaction.vault_credit_card.token)
def test_create_with_failing_validations(self):
params = {
"transaction": {
"amount": None,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}
}
result = Transaction.sale(params["transaction"])
params["transaction"]["credit_card"].pop("number")
self.assertFalse(result.is_success)
self.assertEquals(params, result.params)
self.assertEquals(
ErrorCodes.Transaction.AmountIsRequired,
result.errors.for_object("transaction").on("amount")[0].code
)
def test_credit_with_a_successful_result(self):
result = Transaction.credit({
"amount": Decimal(TransactionAmounts.Authorize),
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEquals(None, re.search("\A\w{6}\Z", transaction.id))
self.assertEquals(Transaction.Type.Credit, transaction.type)
self.assertEquals(Decimal(TransactionAmounts.Authorize), transaction.amount)
cc_details = transaction.credit_card_details
self.assertEquals("411111", cc_details.bin)
self.assertEquals("1111", cc_details.last_4)
self.assertEquals("05/2009", cc_details.expiration_date)
def test_credit_with_unsuccessful_result(self):
result = Transaction.credit({
"amount": None,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
params = {
"transaction": {
"type": Transaction.Type.Credit,
"amount": None,
"credit_card": {
"expiration_date": "05/2009"
}
}
}
self.assertFalse(result.is_success)
self.assertEquals(params, result.params)
self.assertEquals(
ErrorCodes.Transaction.AmountIsRequired,
result.errors.for_object("transaction").on("amount")[0].code
)
def test_credit_with_merchant_account_id(self):
result = Transaction.credit({
"amount": TransactionAmounts.Authorize,
"merchant_account_id": TestHelper.non_default_merchant_account_id,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(TestHelper.non_default_merchant_account_id, transaction.merchant_account_id)
def test_credit_without_merchant_account_id_falls_back_to_default(self):
result = Transaction.credit({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(TestHelper.default_merchant_account_id, transaction.merchant_account_id)
def test_find_returns_a_found_transaction(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}).transaction
found_transaction = Transaction.find(transaction.id)
self.assertEquals(transaction.id, found_transaction.id)
def test_find_for_bad_transaction_raises_not_found_error(self):
try:
Transaction.find("notreal")
self.assertTrue(False)
except NotFoundError, e:
self.assertEquals("transaction with id notreal not found", str(e))
def test_void_with_successful_result(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}).transaction
result = Transaction.void(transaction.id)
self.assertTrue(result.is_success)
self.assertEquals(transaction.id, result.transaction.id)
self.assertEquals(Transaction.Status.Voided, result.transaction.status)
def test_void_with_unsuccessful_result(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Decline,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}).transaction
result = Transaction.void(transaction.id)
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Transaction.CannotBeVoided,
result.errors.for_object("transaction").on("base")[0].code
)
def test_create_with_successful_result(self):
result = Transaction.create({
"type": Transaction.Type.Sale,
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(Transaction.Type.Sale, transaction.type)
def test_create_with_error_result(self):
result = Transaction.create({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"billing": {
"country_code_alpha2": "ZZ",
"country_code_alpha3": "ZZZ",
"country_code_numeric": "000",
"country_name": "zzzzzz"
}
})
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.Transaction.TypeIsRequired, result.errors.for_object("transaction").on("type")[0].code)
self.assertEquals(
ErrorCodes.Address.CountryCodeAlpha2IsNotAccepted,
result.errors.for_object("transaction").for_object("billing").on("country_code_alpha2")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryCodeAlpha3IsNotAccepted,
result.errors.for_object("transaction").for_object("billing").on("country_code_alpha3")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryCodeNumericIsNotAccepted,
result.errors.for_object("transaction").for_object("billing").on("country_code_numeric")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryNameIsNotAccepted,
result.errors.for_object("transaction").for_object("billing").on("country_name")[0].code
)
def test_sale_from_transparent_redirect_with_successful_result(self):
tr_data = {
"transaction": {
"amount": TransactionAmounts.Authorize,
}
}
post_params = {
"tr_data": Transaction.tr_data_for_sale(tr_data, "http://example.com/path"),
"transaction[credit_card][number]": "4111111111111111",
"transaction[credit_card][expiration_date]": "05/2010",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Transaction.transparent_redirect_create_url())
result = Transaction.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(Decimal(TransactionAmounts.Authorize), transaction.amount)
self.assertEquals(Transaction.Type.Sale, transaction.type)
self.assertEquals("411111", transaction.credit_card_details.bin)
self.assertEquals("1111", transaction.credit_card_details.last_4)
self.assertEquals("05/2010", transaction.credit_card_details.expiration_date)
def test_sale_from_transparent_redirect_with_error_result(self):
tr_data = {
"transaction": {
"amount": TransactionAmounts.Authorize,
}
}
post_params = {
"tr_data": Transaction.tr_data_for_sale(tr_data, "http://example.com/path"),
"transaction[credit_card][number]": "booya",
"transaction[credit_card][expiration_date]": "05/2010",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Transaction.transparent_redirect_create_url())
result = Transaction.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertTrue(len(result.errors.for_object("transaction").for_object("credit_card").on("number")) > 0)
def test_sale_from_transparent_redirect_with_403_and_message(self):
tr_data = {
"transaction": {
"amount": TransactionAmounts.Authorize
}
}
post_params = {
"tr_data": Transaction.tr_data_for_sale(tr_data, "http://example.com/path"),
"transaction[credit_card][number]": "booya",
"transaction[credit_card][expiration_date]": "05/2010",
"transaction[bad]": "value"
}
query_string = TestHelper.simulate_tr_form_post(post_params, Transaction.transparent_redirect_create_url())
try:
result = Transaction.confirm_transparent_redirect(query_string)
self.fail()
except AuthorizationError, e:
self.assertEquals("Invalid params: transaction[bad]", e.message)
def test_credit_from_transparent_redirect_with_successful_result(self):
tr_data = {
"transaction": {
"amount": TransactionAmounts.Authorize,
}
}
post_params = {
"tr_data": Transaction.tr_data_for_credit(tr_data, "http://example.com/path"),
"transaction[credit_card][number]": "4111111111111111",
"transaction[credit_card][expiration_date]": "05/2010",
"transaction[billing][country_code_alpha2]": "US",
"transaction[billing][country_code_alpha3]": "USA",
"transaction[billing][country_code_numeric]": "840",
"transaction[billing][country_name]": "United States of America"
}
query_string = TestHelper.simulate_tr_form_post(post_params, Transaction.transparent_redirect_create_url())
result = Transaction.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals(Decimal(TransactionAmounts.Authorize), transaction.amount)
self.assertEquals(Transaction.Type.Credit, transaction.type)
self.assertEquals("411111", transaction.credit_card_details.bin)
self.assertEquals("1111", transaction.credit_card_details.last_4)
self.assertEquals("05/2010", transaction.credit_card_details.expiration_date)
self.assertEquals("US", transaction.billing_details.country_code_alpha2)
self.assertEquals("USA", transaction.billing_details.country_code_alpha3)
self.assertEquals("840", transaction.billing_details.country_code_numeric)
self.assertEquals("United States of America", transaction.billing_details.country_name)
def test_credit_from_transparent_redirect_with_error_result(self):
tr_data = {
"transaction": {
"amount": TransactionAmounts.Authorize,
}
}
post_params = {
"tr_data": Transaction.tr_data_for_credit(tr_data, "http://example.com/path"),
"transaction[credit_card][number]": "booya",
"transaction[credit_card][expiration_date]": "05/2010",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Transaction.transparent_redirect_create_url())
result = Transaction.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertTrue(len(result.errors.for_object("transaction").for_object("credit_card").on("number")) > 0)
def test_submit_for_settlement_without_amount(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}).transaction
submitted_transaction = Transaction.submit_for_settlement(transaction.id).transaction
self.assertEquals(Transaction.Status.SubmittedForSettlement, submitted_transaction.status)
self.assertEquals(Decimal(TransactionAmounts.Authorize), submitted_transaction.amount)
def test_submit_for_settlement_with_amount(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}).transaction
submitted_transaction = Transaction.submit_for_settlement(transaction.id, Decimal("900")).transaction
self.assertEquals(Transaction.Status.SubmittedForSettlement, submitted_transaction.status)
self.assertEquals(Decimal("900.00"), submitted_transaction.amount)
def test_submit_for_settlement_with_validation_error(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}).transaction
result = Transaction.submit_for_settlement(transaction.id, Decimal("1200"))
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Transaction.SettlementAmountIsTooLarge,
result.errors.for_object("transaction").on("amount")[0].code
)
def test_status_history(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
}).transaction
submitted_transaction = Transaction.submit_for_settlement(transaction.id).transaction
self.assertEquals(2, len(submitted_transaction.status_history))
self.assertEquals(Transaction.Status.Authorized, submitted_transaction.status_history[0].status)
self.assertEquals(Decimal(TransactionAmounts.Authorize), submitted_transaction.status_history[0].amount)
self.assertEquals(Transaction.Status.SubmittedForSettlement, submitted_transaction.status_history[1].status)
self.assertEquals(Decimal(TransactionAmounts.Authorize), submitted_transaction.status_history[1].amount)
def test_successful_refund(self):
transaction = self.__create_transaction_to_refund()
result = Transaction.refund(transaction.id)
self.assertTrue(result.is_success)
refund = result.transaction
self.assertEquals(Transaction.Type.Credit, refund.type)
self.assertEquals(Decimal(TransactionAmounts.Authorize), refund.amount)
self.assertEquals(transaction.id, refund.refunded_transaction_id)
self.assertEquals(refund.id, Transaction.find(transaction.id).refund_id)
def test_successful_partial_refund(self):
transaction = self.__create_transaction_to_refund()
result = Transaction.refund(transaction.id, Decimal("500.00"))
self.assertTrue(result.is_success)
self.assertEquals(Transaction.Type.Credit, result.transaction.type)
self.assertEquals(Decimal("500.00"), result.transaction.amount)
def test_multiple_successful_partial_refunds(self):
transaction = self.__create_transaction_to_refund()
refund1 = Transaction.refund(transaction.id, Decimal("500.00")).transaction
self.assertEquals(Transaction.Type.Credit, refund1.type)
self.assertEquals(Decimal("500.00"), refund1.amount)
refund2 = Transaction.refund(transaction.id, Decimal("500.00")).transaction
self.assertEquals(Transaction.Type.Credit, refund2.type)
self.assertEquals(Decimal("500.00"), refund2.amount)
transaction = Transaction.find(transaction.id)
self.assertEquals(2, len(transaction.refund_ids))
self.assertTrue(TestHelper.in_list(transaction.refund_ids, refund1.id))
self.assertTrue(TestHelper.in_list(transaction.refund_ids, refund2.id))
def test_refund_already_refunded_transation_fails(self):
transaction = self.__create_transaction_to_refund()
Transaction.refund(transaction.id)
result = Transaction.refund(transaction.id)
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Transaction.HasAlreadyBeenRefunded,
result.errors.for_object("transaction").on("base")[0].code
)
def test_refund_returns_an_error_if_unsettled(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"options": {
"submit_for_settlement": True
}
}).transaction
result = Transaction.refund(transaction.id)
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Transaction.CannotRefundUnlessSettled,
result.errors.for_object("transaction").on("base")[0].code
)
def __create_transaction_to_refund(self):
transaction = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"options": {
"submit_for_settlement": True
}
}).transaction
TestHelper.settle_transaction(transaction.id)
return transaction
def test_snapshot_add_ons_and_discounts_from_subscription(self):
credit_card = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100"
}
}).customer.credit_cards[0]
result = Subscription.create({
"payment_method_token": credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"add_ons": {
"add": [
{
"amount": Decimal("11.00"),
"inherited_from_id": "increase_10",
"quantity": 2,
"number_of_billing_cycles": 5
},
{
"amount": Decimal("21.00"),
"inherited_from_id": "increase_20",
"quantity": 3,
"number_of_billing_cycles": 6
}
]
},
"discounts": {
"add": [
{
"amount": Decimal("7.50"),
"inherited_from_id": "discount_7",
"quantity": 2,
"never_expires": True
}
]
}
})
transaction = result.subscription.transactions[0]
self.assertEquals(2, len(transaction.add_ons))
add_ons = sorted(transaction.add_ons, key=lambda add_on: add_on.id)
self.assertEquals("increase_10", add_ons[0].id)
self.assertEquals(Decimal("11.00"), add_ons[0].amount)
self.assertEquals(2, add_ons[0].quantity)
self.assertEquals(5, add_ons[0].number_of_billing_cycles)
self.assertFalse(add_ons[0].never_expires)
self.assertEquals("increase_20", add_ons[1].id)
self.assertEquals(Decimal("21.00"), add_ons[1].amount)
self.assertEquals(3, add_ons[1].quantity)
self.assertEquals(6, add_ons[1].number_of_billing_cycles)
self.assertFalse(add_ons[1].never_expires)
self.assertEquals(1, len(transaction.discounts))
discounts = transaction.discounts
self.assertEquals("discount_7", discounts[0].id)
self.assertEquals(Decimal("7.50"), discounts[0].amount)
self.assertEquals(2, discounts[0].quantity)
self.assertEquals(None, discounts[0].number_of_billing_cycles)
self.assertTrue(discounts[0].never_expires)
def test_descriptors_accepts_name_and_phone(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"descriptor": {
"name": "123*123456789012345678",
"phone": "3334445555"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEquals("123*123456789012345678", transaction.descriptor.name)
self.assertEquals("3334445555", transaction.descriptor.phone)
def test_descriptors_has_validation_errors_if_format_is_invalid(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"descriptor": {
"name": "badcompanyname12*badproduct12",
"phone": "%bad4445555"
}
})
self.assertFalse(result.is_success)
transaction = result.transaction
self.assertEquals(
ErrorCodes.Descriptor.NameFormatIsInvalid,
result.errors.for_object("transaction").for_object("descriptor").on("name")[0].code
)
self.assertEquals(
ErrorCodes.Descriptor.PhoneFormatIsInvalid,
result.errors.for_object("transaction").for_object("descriptor").on("phone")[0].code
)
|
|
__author__ = 'casey'
import ast
class AddressFactory(object):
@staticmethod
def from_db_str(st):
try:
if len(st) > 0 and ':::' in st:
s = st.split(":::")
if s[0] == BrickAddress.__name__:
return BrickAddress.from_db_str(st)
elif s[0] == BrickFileAddress.__name__:
return BrickFileAddress.from_db_str(st)
elif s[0] == FileAddress.__name__:
return FileAddress.from_db_str(st)
elif s[0] == Address.__name__:
return Address.from_db_str(st)
elif len(st) > 0:
return Address(st)
except Exception as ex:
pass
raise ValueError("Do not know how to build address from string: %s", st)
@staticmethod
def from_str(st):
try:
d = ast.literal_eval(st)
if isinstance(d, dict):
if 'type' in d:
t = d['type']
if t == BrickAddress.__name__:
return BrickAddress.from_dict(d)
elif t == BrickFileAddress.__name__:
return BrickFileAddress.from_dict(d)
elif t == FileAddress.__name__:
return FileAddress.from_dict(d)
elif t == Address.__name__:
return Address.from_dict(d)
except Exception as ex:
pass
raise ValueError("Do not know how to build address from string: %s", st)
@staticmethod
def from_tuple(tup):
if len(tup) > 1:
address_type = tup[0]
if address_type == Address.__name__:
return Address.from_tuple(tup)
elif address_type == FileAddress.__name__:
return FileAddress.from_tuple(tup)
elif address_type == BrickAddress.__name__:
return BrickAddress.from_tuple(tup)
elif address_type == BrickFileAddress.__name__:
return BrickFileAddress.from_tuple(tup)
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
class Address(object):
def __init__(self, coverage_uid):
self.coverage_uid = coverage_uid
pass
def get_top_level_key(self):
raise NotImplementedError('Not implemented by base class')
def as_dict(self):
return {'type': Address.__name__,
'coverage_uid': self.coverage_uid}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == Address.__name__:
if 'coverage_uid' in dic:
return Address(dic['coverage_uid'])
raise ValueError("Do not know how to build address from %s ", str(dic))
def as_tuple(self):
tup = "Address", self.coverage_uid
return tup
@staticmethod
def from_tuple(tup):
if len(tup) != 1:
raise ValueError("".join(["Expected tuple of size 1. Found ", str(tup)]))
if tup[0] == "Address":
return Address(tup[0])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return Address.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.coverage_uid
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __key__(self):
return self.as_dict()
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__())
import os
class IDAddress(Address):
def __init__(self, id):
Address.__init__(self, id)
self.id
def as_dict(self):
return {'type': IDAddress.__name__,
'id': self.id}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == IDAddress.__name__:
if 'id' in dic:
return IDAddress(dic['id'])
raise ValueError("Do not now how to build %s from %s" % (IDAddress.__name__, str(dic)))
def as_tuple(self):
tup = IDAddress.__name__, self.id
return tup
@staticmethod
def from_tuple(tup):
if len(tup) != 2:
raise ValueError("".join(["Expected tuple of size 2. Found ", str(tup)]))
if tup[0] == IDAddress.__name__:
return IDAddress(tup[1])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return IDAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.id
class FileAddress(Address):
def __init__(self, coverage_uid, file_path, begin=0, end=-1, validate=False):
Address.__init__(self, coverage_uid)
if validate:
if not os.path.exists(file_path):
raise ValueError("".join(["File does not exist at path: ", file_path]))
self.file_path = file_path
self.begin = begin
self.end = end
def as_dict(self):
return {'type': FileAddress.__name__,
'coverage_uid': self.coverage_uid,
'file_path': self.file_path,
'begin': self.begin,
'end': self.end}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == FileAddress.__name__:
if 'coverage_uid' in dic and 'file_path' in dic and 'begin' in dic and 'end' in dic:
return FileAddress(dic['coverage_uid'], dic['file_path'], dic['begin'], dic['end'])
raise ValueError("Do not know how to build address from %s ", str(dic))
def as_tuple(self):
tup = "FileAddress", self.coverage_uid, self.file_path, self.begin, self.end
return tup
@staticmethod
def from_tuple(tup):
if len(tup) != 5:
raise ValueError("".join(["Expected tuple of size 5. Found ", str(tup)]))
if tup[0] == "FileAddress":
return FileAddress(tup[1], tup[2], tup[3], tup[4])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return BrickAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.file_path
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __key__(self):
return self.as_dict()
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__())
class BrickAddress(Address):
def __init__(self, coverage_uid, brick_id, brick_slice):
Address.__init__(self, coverage_uid)
self.brick_id = brick_id
self.brick_slice = brick_slice
def as_tuple(self):
tup = "BrickAddress", self.coverage_uid, self.brick_id, self.brick_slice
return tup
def as_dict(self):
return {'type': BrickAddress.__name__,
'coverage_uid': self.coverage_uid,
'brick_id': self.brick_id,
'brick_slice': self.brick_slice}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == BrickAddress.__name__:
if 'coverage_uid' in dic and 'brick_id' in dic and 'brick_slice':
return BrickAddress(dic['coverage_uid'], dic['brick_id'], dic['brick_slice'])
raise ValueError("Do not know how to build address from %s ", str(dic))
@staticmethod
def from_tuple(tup):
if len(tup) != 4:
raise ValueError("".join(["Expected tuple of size 5. Found ", str(len(tup))]))
if tup[0] == "BrickAddress":
return BrickAddress(tup[1], tup[2], tup[3])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return BrickAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.coverage_uid, self.brick_id
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __key__(self):
return self.as_dict()
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__())
class BrickFileAddress(Address):
def __init__(self, coverage_uid, brick_id):
Address.__init__(self, coverage_uid)
self.brick_id = brick_id
def as_tuple(self):
tup = "BrickFileAddress", self.coverage_uid, self.brick_id
return tup
def as_dict(self):
return {'type': BrickFileAddress.__name__,
'coverage_uid': self.coverage_uid,
'brick_id': self.brick_id}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == BrickFileAddress.__name__:
if 'coverage_uid' in dic and 'brick_id' in dic:
return BrickFileAddress(dic['coverage_uid'], dic['brick_id'])
raise ValueError("Do not know how to build address from %s ", str(dic))
@staticmethod
def from_tuple(tup):
if len(tup) != 3:
raise ValueError("".join(["Expected tuple of size 5. Found ", str(tup)]))
if tup[0] == "BrickFileAddress":
return BrickFileAddress(tup[1], tup[2])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
def get_db_str(self):
return ''.join([BrickFileAddress.__name__, ':::',
self.coverage_uid, ':::', self.brick_id])
@staticmethod
def from_db_str(db_str):
try:
tp, cov_id, brick_id = db_str.split(":::")
if tp == BrickFileAddress.__name__:
return BrickFileAddress(cov_id, brick_id)
except Exception as ex:
pass
raise ValueError("Do not know how to build address from %s ", str(db_str))
@staticmethod
def from_str(st):
return BrickFileAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.coverage_uid + "::" + self.brick_id
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return not self.__eq__(other)
def __key__(self):
return str(self.as_dict())
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__())
|
|
import pytest
import numpy as np
import scipy as sp
import openpnm as op
from numpy.testing import assert_allclose
class MultiPhaseTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[10, 10, 10])
self.water = op.phases.Water(network=self.net, name="water")
self.air = op.phases.Air(network=self.net, name="air")
self.oil = op.phases.Water(network=self.net, name="oil")
def test_multiphase_init(self):
m = op.phases.MultiPhase(network=self.net, phases=[self.air,
self.water])
assert np.all(m['pore.occupancy.all'] == 0.0)
assert np.all(m['throat.occupancy.all'] == 0.0)
assert self.air.name in m.settings['phases']
assert self.water.name in m.settings['phases']
def test_multiphase_no_occupancy_yet(self):
m = op.phases.MultiPhase(network=self.net, phases=[self.air,
self.water])
self.water['pore.temperature'] = 300
assert np.all(self.water['pore.temperature'] == 300)
with pytest.raises(Exception):
m['pore.temperature']
def test_multiphase_set_occupancy_w_indices_only(self):
m = op.phases.MultiPhase(network=self.net, phases=[self.air,
self.water])
Ps_water = np.array([0, 1, 2])
Ps_water_mask = self.net.tomask(pores=Ps_water)
Ts_water = np.array([4, 12, 22])
Ts_water_mask = self.net.tomask(throats=Ts_water)
m.set_occupancy(self.water, pores=Ps_water, throats=Ts_water)
assert m["pore.occupancy.water"][Ps_water_mask].mean() == 1
assert m["pore.occupancy.water"][~Ps_water_mask].mean() == 0
assert m["throat.occupancy.water"][Ts_water_mask].mean() == 1
assert m["throat.occupancy.water"][~Ts_water_mask].mean() == 0
def test_multiphase_set_occupancy_w_values_only(self):
m = op.phases.MultiPhase(network=self.net, phases=[self.air,
self.water])
Pvals = np.array([0.5, 0.9, 0.01])
Tvals = np.array([0.9, 0.01])
# Pvals must be Np-long if not accompanied by "pores" argument
with pytest.raises(Exception):
m.set_occupancy(self.water, Pvals=Pvals)
# Tvals must be Nt-long if not accompanied by "throats" argument
with pytest.raises(Exception):
m.set_occupancy(self.water, Tvals=Tvals)
# Set pore occupancy
m.set_occupancy(self.water, Pvals=1)
assert m["pore.occupancy.water"].mean() == 1
Pvals = np.ones(self.net.Np) * 0.5
m.set_occupancy(self.water, Pvals=Pvals)
assert m["pore.occupancy.water"].mean() == 0.5
# Setting throat occupancy
m.set_occupancy(self.water, Tvals=1)
assert m["throat.occupancy.water"].mean() == 1
Tvals = np.ones(self.net.Nt) * 0.54
m.set_occupancy(self.water, Tvals=Tvals)
assert m["throat.occupancy.water"].mean() == 0.54
def test_multiphase_set_occupancy_w_pore_indices_and_Pvals(self):
m = op.phases.MultiPhase(network=self.net, phases=[self.air,
self.water])
Ps_water = np.array([0, 1, 2])
Pvals = np.array([0.5, 0.9, 0.01])
Ps_water_mask = self.net.tomask(Ps_water)
Ts_water = np.array([4, 12, 22])
Ts_water_mask = self.net.tomask(throats=Ts_water)
Tvals = np.array([0.3, 0.4, 0.1])
# Pvals/Tvals and pores/throats; same array length
m.set_occupancy(self.water, pores=Ps_water, Pvals=Pvals,
throats=Ts_water, Tvals=Tvals)
assert_allclose(m["pore.occupancy.water"][Ps_water], Pvals)
assert_allclose(m["throat.occupancy.water"][Ts_water], Tvals)
assert m["pore.occupancy.water"][~Ps_water_mask].mean() == 0
assert m["throat.occupancy.water"][~Ts_water_mask].mean() == 0
# Pvals and pores; inconsistent size
with pytest.raises(Exception):
m.set_occupancy(self.water, pores=[1, 5, 10], Pvals=[0.5, 0])
# Tvals and throats; inconsistent size
with pytest.raises(Exception):
m.set_occupancy(self.water, throats=[10, 52, 0], Tvals=[0.5, 0.01])
# Pvals/Tvals.size = 1 and pores/throats.size > 1
m.set_occupancy(self.water, pores=Ps_water, Pvals=0.25,
throats=Ts_water, Tvals=0.23)
assert m["pore.occupancy.water"][Ps_water].mean() == 0.25
assert m["throat.occupancy.water"][Ts_water].mean() == 0.23
def test_multiphase_automatic_throat_occupancy(self):
m = op.phases.MultiPhase(network=self.net, phases=[self.air,
self.water])
pores = sp.random.choice(self.net.Ps, size=100, replace=False)
Pvals = sp.random.random(pores.size)
m.set_occupancy(self.water, pores=pores, Pvals=Pvals)
P1, P2 = self.net["throat.conns"].T
oc1, oc2 = [m["pore.occupancy.water"][x] for x in (P1, P2)]
# Throats take average occupancy of adjacent pores
m._set_automatic_throat_occupancy(mode="mean")
assert_allclose(m["throat.occupancy.water"], (oc1+oc2)/2)
# Throats take maximum occupancy of adjacent pores
m._set_automatic_throat_occupancy(mode="max")
assert_allclose(m["throat.occupancy.water"], np.maximum(oc1, oc2))
# Throats take minimum occupancy of adjacent pores
m._set_automatic_throat_occupancy(mode="min")
assert_allclose(m["throat.occupancy.water"], np.minimum(oc1, oc2))
def test_multiphase_occupancy_set_single_phase(self):
m = op.phases.MultiPhase(network=self.net)
self.water['pore.temperature'] = 300
assert np.all(self.water['pore.temperature'] == 300)
m.set_occupancy(phase=self.water, Pvals=1, Tvals=1)
assert np.all(m['pore.temperature'] == 300)
def test_multiphase_occupancy_set_two_phase(self):
m = op.phases.MultiPhase(network=self.net)
self.water['pore.temperature'] = 300
self.air['pore.temperature'] = 200
assert np.all(self.water['pore.temperature'] == 300)
assert np.all(self.air['pore.temperature'] == 200)
Ps = self.net['pore.coords'][:, 0] < 3
Ts = self.net.tomask(throats=self.net.find_neighbor_throats(Ps))
m.set_occupancy(phase=self.water, Pvals=Ps, Tvals=Ts)
m.set_occupancy(phase=self.air, Pvals=~Ps, Tvals=~Ts)
assert np.all(m['pore.temperature'] >= 200)
assert np.all(m['pore.temperature'] <= 300)
def test_mutliphase_partition_coef(self):
m = op.phases.MultiPhase(network=self.net,
phases=[self.water, self.air, self.oil])
x, y, z = self.net["pore.coords"].T
ps_water = self.net.Ps[(y <= 3) + (y >= 8)]
ps_air = self.net.Ps[(y > 3) * (y < 6)]
ps_oil = self.net.Ps[(y >= 6) * (y < 8)]
# Phase arrangement (y-axis): W | A | O | W
m.set_occupancy(phase=self.water, pores=ps_water)
m.set_occupancy(phase=self.air, pores=ps_air)
m.set_occupancy(phase=self.oil, pores=ps_oil)
const = op.models.misc.constant
K_air_water = 2.0
K_air_oil = 1.8
K_water_oil = 0.73
m.set_binary_partition_coef(propname="throat.partition_coef",
phases=[self.air, self.water],
model=const, value=K_air_water)
m.set_binary_partition_coef(propname="throat.partition_coef",
phases=[self.air, self.oil],
model=const, value=K_air_oil)
m.set_binary_partition_coef(propname="throat.partition_coef",
phases=[self.water, self.oil],
model=const, value=K_water_oil)
K_aw = m["throat.partition_coef.air:water"]
K_ao = m["throat.partition_coef.air:oil"]
K_wo = m["throat.partition_coef.water:oil"]
K_global = m["throat.partition_coef.all"]
assert np.isclose(K_aw.mean(), K_air_water)
assert np.isclose(K_ao.mean(), K_air_oil)
assert np.isclose(K_wo.mean(), K_water_oil)
# Get water-air interface throats
tmp1 = self.net.find_neighbor_throats(ps_water, mode="xor")
tmp2 = self.net.find_neighbor_throats(ps_air, mode="xor")
Ts_water_air_interface = np.intersect1d(tmp1, tmp2)
# Get air-oil interface throats
tmp1 = self.net.find_neighbor_throats(ps_air, mode="xor")
tmp2 = self.net.find_neighbor_throats(ps_oil, mode="xor")
Ts_air_oil_interface = np.intersect1d(tmp1, tmp2)
# Get oil-water interface throats
tmp1 = self.net.find_neighbor_throats(ps_oil, mode="xor")
tmp2 = self.net.find_neighbor_throats(ps_water, mode="xor")
Ts_oil_water_interface = np.intersect1d(tmp1, tmp2)
# K_global for water-air interface must be 1/K_air_water
assert np.isclose(K_global[Ts_water_air_interface].mean(), 1/K_air_water)
# K_global for air-oil interface must be K_air_oil (not 1/K_air_oil)
assert np.isclose(K_global[Ts_air_oil_interface].mean(), K_air_oil)
# K_global for oil-water interface must be 1/K_water_oil
assert np.isclose(K_global[Ts_oil_water_interface].mean(), 1/K_water_oil)
# K_global for single-phase regions must be 1.0
interface_throats = np.hstack((Ts_water_air_interface,
Ts_air_oil_interface,
Ts_oil_water_interface))
Ts_single_phase = np.setdiff1d(self.net.Ts, interface_throats)
assert np.isclose(K_global[Ts_single_phase].mean(), 1.0)
def test_multiphase_invalid_phase(self):
pn = op.network.Cubic(shape=[3, 3, 3])
water = op.phases.Water(network=pn)
m = op.phases.MultiPhase(network=self.net)
with pytest.raises(Exception):
m.set_occupancy(phase=water)
def test_multiphase_invalid_occupancy(self):
m = op.phases.MultiPhase(network=self.net, phases=[self.water, self.air])
# The next line ideally should throw an Exception, but warning for now
m.set_occupancy(phase=self.water, Pvals=1.5, Tvals=2.5)
if __name__ == '__main__':
t = MultiPhaseTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import sql
from keystone import exception
from keystone.i18n import _
class ProjectEndpoint(sql.ModelBase, sql.ModelDictMixin):
"""project-endpoint relationship table."""
__tablename__ = 'project_endpoint'
attributes = ['endpoint_id', 'project_id']
endpoint_id = sql.Column(sql.String(64),
primary_key=True,
nullable=False)
project_id = sql.Column(sql.String(64),
primary_key=True,
nullable=False)
class EndpointGroup(sql.ModelBase, sql.ModelDictMixin):
"""Endpoint Groups table."""
__tablename__ = 'endpoint_group'
attributes = ['id', 'name', 'description', 'filters']
mutable_attributes = frozenset(['name', 'description', 'filters'])
id = sql.Column(sql.String(64), primary_key=True)
name = sql.Column(sql.String(255), nullable=False)
description = sql.Column(sql.Text, nullable=True)
filters = sql.Column(sql.JsonBlob(), nullable=False)
class ProjectEndpointGroupMembership(sql.ModelBase, sql.ModelDictMixin):
"""Project to Endpoint group relationship table."""
__tablename__ = 'project_endpoint_group'
attributes = ['endpoint_group_id', 'project_id']
endpoint_group_id = sql.Column(sql.String(64),
sql.ForeignKey('endpoint_group.id'),
nullable=False)
project_id = sql.Column(sql.String(64), nullable=False)
__table_args__ = (sql.PrimaryKeyConstraint('endpoint_group_id',
'project_id'), {})
class EndpointFilter(object):
@sql.handle_conflicts(conflict_type='project_endpoint')
def add_endpoint_to_project(self, endpoint_id, project_id):
session = sql.get_session()
with session.begin():
endpoint_filter_ref = ProjectEndpoint(endpoint_id=endpoint_id,
project_id=project_id)
session.add(endpoint_filter_ref)
def _get_project_endpoint_ref(self, session, endpoint_id, project_id):
endpoint_filter_ref = session.query(ProjectEndpoint).get(
(endpoint_id, project_id))
if endpoint_filter_ref is None:
msg = _('Endpoint %(endpoint_id)s not found in project '
'%(project_id)s') % {'endpoint_id': endpoint_id,
'project_id': project_id}
raise exception.NotFound(msg)
return endpoint_filter_ref
def check_endpoint_in_project(self, endpoint_id, project_id):
session = sql.get_session()
self._get_project_endpoint_ref(session, endpoint_id, project_id)
def remove_endpoint_from_project(self, endpoint_id, project_id):
session = sql.get_session()
endpoint_filter_ref = self._get_project_endpoint_ref(
session, endpoint_id, project_id)
with session.begin():
session.delete(endpoint_filter_ref)
def list_endpoints_for_project(self, project_id):
session = sql.get_session()
query = session.query(ProjectEndpoint)
query = query.filter_by(project_id=project_id)
endpoint_filter_refs = query.all()
return [ref.to_dict() for ref in endpoint_filter_refs]
def list_projects_for_endpoint(self, endpoint_id):
session = sql.get_session()
query = session.query(ProjectEndpoint)
query = query.filter_by(endpoint_id=endpoint_id)
endpoint_filter_refs = query.all()
return [ref.to_dict() for ref in endpoint_filter_refs]
def delete_association_by_endpoint(self, endpoint_id):
session = sql.get_session()
with session.begin():
query = session.query(ProjectEndpoint)
query = query.filter_by(endpoint_id=endpoint_id)
query.delete(synchronize_session=False)
def delete_association_by_project(self, project_id):
session = sql.get_session()
with session.begin():
query = session.query(ProjectEndpoint)
query = query.filter_by(project_id=project_id)
query.delete(synchronize_session=False)
def create_endpoint_group(self, endpoint_group_id, endpoint_group):
session = sql.get_session()
with session.begin():
endpoint_group_ref = EndpointGroup.from_dict(endpoint_group)
session.add(endpoint_group_ref)
return endpoint_group_ref.to_dict()
def _get_endpoint_group(self, session, endpoint_group_id):
endpoint_group_ref = session.query(EndpointGroup).get(
endpoint_group_id)
if endpoint_group_ref is None:
raise exception.EndpointGroupNotFound(
endpoint_group_id=endpoint_group_id)
return endpoint_group_ref
def get_endpoint_group(self, endpoint_group_id):
session = sql.get_session()
endpoint_group_ref = self._get_endpoint_group(session,
endpoint_group_id)
return endpoint_group_ref.to_dict()
def update_endpoint_group(self, endpoint_group_id, endpoint_group):
session = sql.get_session()
with session.begin():
endpoint_group_ref = self._get_endpoint_group(session,
endpoint_group_id)
old_endpoint_group = endpoint_group_ref.to_dict()
old_endpoint_group.update(endpoint_group)
new_endpoint_group = EndpointGroup.from_dict(old_endpoint_group)
for attr in EndpointGroup.mutable_attributes:
setattr(endpoint_group_ref, attr,
getattr(new_endpoint_group, attr))
return endpoint_group_ref.to_dict()
def delete_endpoint_group(self, endpoint_group_id):
session = sql.get_session()
endpoint_group_ref = self._get_endpoint_group(session,
endpoint_group_id)
with session.begin():
self._delete_endpoint_group_association_by_endpoint_group(
session, endpoint_group_id)
session.delete(endpoint_group_ref)
def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
session = sql.get_session()
ref = self._get_endpoint_group_in_project(session,
endpoint_group_id,
project_id)
return ref.to_dict()
@sql.handle_conflicts(conflict_type='project_endpoint_group')
def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
session = sql.get_session()
with session.begin():
# Create a new Project Endpoint group entity
endpoint_group_project_ref = ProjectEndpointGroupMembership(
endpoint_group_id=endpoint_group_id, project_id=project_id)
session.add(endpoint_group_project_ref)
def _get_endpoint_group_in_project(self, session,
endpoint_group_id, project_id):
endpoint_group_project_ref = session.query(
ProjectEndpointGroupMembership).get((endpoint_group_id,
project_id))
if endpoint_group_project_ref is None:
msg = _('Endpoint Group Project Association not found')
raise exception.NotFound(msg)
else:
return endpoint_group_project_ref
def list_endpoint_groups(self):
session = sql.get_session()
query = session.query(EndpointGroup)
endpoint_group_refs = query.all()
return [e.to_dict() for e in endpoint_group_refs]
def list_endpoint_groups_for_project(self, project_id):
session = sql.get_session()
query = session.query(ProjectEndpointGroupMembership)
query = query.filter_by(project_id=project_id)
endpoint_group_refs = query.all()
return [ref.to_dict() for ref in endpoint_group_refs]
def remove_endpoint_group_from_project(self, endpoint_group_id,
project_id):
session = sql.get_session()
endpoint_group_project_ref = self._get_endpoint_group_in_project(
session, endpoint_group_id, project_id)
with session.begin():
session.delete(endpoint_group_project_ref)
def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
session = sql.get_session()
query = session.query(ProjectEndpointGroupMembership)
query = query.filter_by(endpoint_group_id=endpoint_group_id)
endpoint_group_refs = query.all()
return [ref.to_dict() for ref in endpoint_group_refs]
def _delete_endpoint_group_association_by_endpoint_group(
self, session, endpoint_group_id):
query = session.query(ProjectEndpointGroupMembership)
query = query.filter_by(endpoint_group_id=endpoint_group_id)
query.delete()
def delete_endpoint_group_association_by_project(self, project_id):
session = sql.get_session()
with session.begin():
query = session.query(ProjectEndpointGroupMembership)
query = query.filter_by(project_id=project_id)
query.delete()
|
|
from ScopeFoundry.data_browser import DataBrowserView
import numpy as np
import h5py
import pyqtgraph as pg
import pyqtgraph.dockarea as dockarea
from qtpy import QtWidgets, QtGui
from scipy import optimize
from scipy.ndimage.filters import gaussian_filter, correlate1d
from scipy.signal import savgol_filter
from scipy import interpolate
import sys
import time
#sys.path.insert(0, '/home/dbdurham/foundry_scope/FoundryDataBrowser/viewers')
from .drift_correction import register_translation_hybrid, shift_subpixel, \
compute_pairwise_shifts, compute_retained_box, align_image_stack
class AugerSpecMapView(DataBrowserView):
name = 'auger_spec_map'
def setup(self):
self.data_loaded = False
self.settings.New('drift_correct_type', dtype=str, initial='Pairwise', choices=('Pairwise','Pairwise + Running Avg'))
self.settings.New('drift_correct_adc_chan', dtype=int)
self.settings.New('drift_correct', dtype=bool)
self.settings.New('overwrite_alignment', dtype=bool)
# if drift corrected datasets already exist, overwrite?
self.settings.New('run_preprocess', dtype=bool)
self.settings.get_lq('run_preprocess').add_listener(self.preprocess)
self.settings.New('use_preprocess', dtype=bool, initial=True)
self.settings.get_lq('use_preprocess').add_listener(self.load_new_data)
self.settings.New('update_auger_map', dtype=bool, initial=False)
self.settings.get_lq('update_auger_map').add_listener(self.update_current_auger_map)
self.settings.New('equalize_detectors', dtype=bool)
self.settings.New('normalize_by_pass_energy', dtype=bool)
self.settings.New('spatial_smooth_sigma', dtype=float, vmin=0.0)
self.settings.New('spectral_smooth_type', dtype=str, choices=['None', 'Gaussian', 'Savitzky-Golay'], initial='None')
self.settings.New('spectral_smooth_gauss_sigma', dtype=float, vmin=0.0)
self.settings.New('spectral_smooth_savgol_width', dtype=int, vmin=0)
self.settings.New('spectral_smooth_savgol_order', dtype=int, vmin=0, initial=2)
# Assume same tougaard parameters everywhere
self.settings.New('subtract_tougaard', dtype=bool)
self.settings.New('R_loss', dtype=float)
self.settings.New('E_loss', dtype=float)
auger_lqs = ['equalize_detectors', 'normalize_by_pass_energy',
'spatial_smooth_sigma','spectral_smooth_type',
'spectral_smooth_gauss_sigma','spectral_smooth_savgol_width',
'spectral_smooth_savgol_order', 'subtract_tougaard',
'R_loss', 'E_loss']
# Link all the auger spectrum lqs to the update current auger map listener
for alq in auger_lqs:
self.settings.get_lq(alq).add_listener(self.update_current_auger_map)
self.settings.New('ke0_start', dtype=float)
self.settings.New('ke0_stop', dtype=float)
self.settings.New('ke1_start', dtype=float)
self.settings.New('ke1_stop', dtype=float)
for lqname in ['ke0_start', 'ke0_stop', 'ke1_start', 'ke1_stop']:
self.settings.get_lq(lqname).add_listener(self.on_change_ke_settings)
# Subtract the B section (ke1_start through ke1_stop) by a power law fit
self.settings.New('subtract_ke1', dtype=str, choices=['None','Linear','Power Law'], initial='None')
self.settings.get_lq('subtract_ke1').add_listener(self.update_current_auger_map)
#Math mode now updates automatically on change
self.settings.New('math_mode', dtype=str, initial='A')
self.settings.get_lq('math_mode').add_listener(self.on_change_math_mode)
self.settings.New('AB_mode', dtype=str, choices=['Mean', 'Integral'], initial='Mean')
self.settings.get_lq('AB_mode').add_listener(self.on_change_ke_settings)
self.settings.New('spectrum_over_ROI', dtype=bool)
self.settings.get_lq('spectrum_over_ROI').add_listener(self.on_change_spectrum_over_ROI)
self.settings.New('analysis_over_spectrum', dtype=bool)
self.settings.get_lq('analysis_over_spectrum').add_listener(self.update_current_auger_map)
self.settings.New('mean_spectrum_only', dtype=bool, initial=False)
self.settings.get_lq('mean_spectrum_only').add_listener(self.on_change_mean_spectrum_only)
# Make plots on white background
#pg.setConfigOption('background', 'w')
#pg.setConfigOption('foreground', 'k')
self.ui = self.dockarea = dockarea.DockArea()
# List of settings to include in preprocessing tab
names_prep = ['drift_correct_type','drift_correct_adc_chan', 'drift_correct',
'overwrite_alignment', 'run_preprocess']
self.setdock = self.dockarea.addDock(name='Settings', position='left',
widget=self.settings.New_UI(exclude=names_prep))
self.prepdock = self.dockarea.addDock(name='Preprocess', position='left',
widget=self.settings.New_UI(include=names_prep))
self.dockarea.moveDock(self.setdock, 'above', self.prepdock)
# Images
self.imview_sem0_stack = pg.ImageView()
self.imview_sem0_stack.getView().invertY(False) # lower left origin
self.imdockA_stack = self.dockarea.addDock(name='SE2 Image Stack', widget=self.imview_sem0_stack)
self.imview_sem1_stack = pg.ImageView()
self.imview_sem1_stack.getView().invertY(False) # lower left origin
self.imdockB_stack = self.dockarea.addDock(name='InLens Image Stack', position='right', widget=self.imview_sem1_stack)
self.imview_sem0 = pg.ImageView()
self.imview_sem0.getView().invertY(False) # lower left origin
self.imdockA = self.dockarea.addDock(name='SE2 Mean Image', widget=self.imview_sem0)
self.imview_sem1 = pg.ImageView()
self.imview_sem1.getView().invertY(False) # lower left origin
self.imdockB = self.dockarea.addDock(name='InLens Mean Image', widget=self.imview_sem1)
self.imview_auger = pg.ImageView()
self.imview_auger.getView().invertY(False) # lower left origin
self.imdockAuger = self.dockarea.addDock(name='Auger Map', widget=self.imview_auger)
self.im_auger = self.imview_auger.getImageItem()
# tab image and auger map docks
self.dockarea.moveDock(self.imdockA_stack, 'above', self.imdockB_stack)
self.dockarea.moveDock(self.imdockB, 'above', self.imdockA_stack)
self.dockarea.moveDock(self.imdockA, 'above', self.imdockB)
self.dockarea.moveDock(self.imdockAuger, 'above', self.imdockA)
# Polygon ROI
self.poly_roi = pg.PolyLineROI([[20,0], [20,20], [0,20]], pen = pg.mkPen((255,0,0),dash=[5,5], width=1.5), closed=True)
#self.poly_roi = pg.RectROI([20, 20], [20, 20], pen=(0,9))
#self.poly_roi = pg.CircleROI((0,0), (10,10) , movable=True, pen=(0,9))
#self.poly_roi.addTranslateHandle((0.5,0.5))
self.imview_auger.getView().addItem(self.poly_roi)
self.poly_roi.sigRegionChanged[object].connect(self.on_change_roi)
# Scalebar ROI
self.scalebar = pg.LineROI([5, 5], [25, 5], width=0, pen = pg.mkPen(color=(255,255,0),width=4.5))
self.imview_auger.getView().addItem(self.scalebar)
# Create initial scalebar w/ text
self.scale_text = pg.TextItem(color=(255,255,0), anchor=(0.5,1))
self.imview_auger.getView().addItem(self.scale_text)
self.scale_text.setFont(pg.QtGui.QFont('Arial', pointSize = 11))
self.scalebar.sigRegionChanged[object].connect(self.on_change_scalebar)
# Change handle colors so they don't appear by default, but do when hovered over
scale_handles = self.scalebar.getHandles()
scale_handles[0].currentPen.setColor(pg.mkColor(255,255,255,0))
scale_handles[1].currentPen.setColor(pg.mkColor(255,255,255,0))
# This disables the middle handle that allows line width change
scale_handles[2].setOpacity(0.0)
# Spectrum plot
self.graph_layout = pg.GraphicsLayoutWidget()
self.spec_plot = self.graph_layout.addPlot()
self.legend = self.spec_plot.addLegend()
self.spec_plot.setLabel('bottom','Electron Kinetic Energy')
self.spec_plot.setLabel('left','Intensity (Hz)')
#self.rect_plotdata = self.spec_plot.plot()
#self.point_plotdata = self.spec_plot.plot(pen=(0,9))
self.dockarea.addDock(name='Spec Plot',position='bottom', widget=self.graph_layout)
self.lr0 = pg.LinearRegionItem(values=[0,1], brush=QtGui.QBrush(QtGui.QColor(0, 0, 255, 50)))
self.lr1 = pg.LinearRegionItem(values=[2,3], brush=QtGui.QBrush(QtGui.QColor(255, 0, 0, 50)))
for lr in (self.lr0, self.lr1):
lr.setZValue(10)
self.spec_plot.addItem(lr, ignoreBounds=True)
lr.sigRegionChangeFinished.connect(self.on_change_regions)
self.chan_plotlines = []
# define plotline color scheme going from orange -> yellow -> green
R = np.linspace(220,0,4)
G = np.linspace(220,100,4)
plot_colors = [(R[0], G[0], 0), (R[1], G[0], 0), (R[0], G[1], 0),
(R[2], G[0], 0), (R[0], G[2], 0), (R[3], G[0], 100),
(R[0], G[3], 0)]
for ii in range(7):
self.chan_plotlines.append(
self.spec_plot.plot([0], pen=pg.mkPen(color=plot_colors[ii],width=2), name='chan ' + str(ii), width=20))
self.total_plotline = self.spec_plot.plot(pen=pg.mkPen(color=(0,0,0), width=3), name='mean')
def is_file_supported(self, fname):
return "auger_sync_raster_scan.h5" in fname
def on_change_data_filename(self, fname=None):
if fname == "0":
return
try:
# FIX: Should close the h5 file that was previously open
# FIX: h5 file should also be closed on program close
self.data_loaded = False
self.fname = fname
print('opening hdf5 file...')
self.dat = h5py.File(self.fname, 'r+')
print('hdf5 file loaded')
self.H = self.dat['measurement/auger_sync_raster_scan/']
h = self.h_settings = dict(self.H['settings'].attrs)
self.h_range = (h['h0'], h['h1'])
self.v_range = (h['v0'], h['v1'])
self.nPixels = (h['Nh'], h['Nv'])
self.R = self.dat['hardware/sem_remcon/']
r = self.r_settings = self.R['settings'].attrs
self.full_size = r['full_size'] # in meters
#scan_shape = self.adc_map.shape[:-1]
# # Close the h5 dataset, everything is stored in current memory now
# self.dat.close()
self.data_loaded = True
self.load_new_data()
#self.update_display()
except Exception as err:
print(err)
self.imview_auger.setImage(np.zeros((10,10)))
self.databrowser.ui.statusbar.showMessage("Failed to load %s: %s" %(fname, err))
raise(err)
def preprocess(self):
if not self.data_loaded:
return
if self.settings['drift_correct']:
# Need to read existing datasets here in case of changes since loading file initially
h_datasets = list(self.H.keys())
if self.settings['overwrite_alignment'] or not('auger_chan_map_aligned' in h_datasets):
if 'auger_chan_map_aligned' in h_datasets:
del self.H['adc_map_aligned']
del self.H['auger_chan_map_aligned']
t0 = time.time()
self.drift_correct()
tdc = time.time()
print('Drift correct time', tdc-t0)
# refer to aligned data
self.adc_map_h5 = self.adc_map_aligned_h5
self.auger_map_h5 = self.auger_map_aligned_h5
else:
# refer to raw data
self.adc_map_h5 = self.adc_map_raw_h5
self.auger_map_h5 = self.auger_map_raw_h5
t0 = time.time()
# Update displays
self.imview_sem0_stack.setImage(np.transpose(self.adc_map_h5[:,:,:,:,0].mean(axis=1), (0,2,1)))
self.imview_sem1_stack.setImage(np.transpose(self.adc_map_h5[:,:,:,:,1].mean(axis=1), (0,2,1)))
self.imview_sem0.setImage(np.transpose(self.adc_map_h5[:,:,:,:,0].mean(axis=(0,1))))
self.imview_sem1.setImage(np.transpose(self.adc_map_h5[:,:,:,:,1].mean(axis=(0,1))))
tsup = time.time()
print('update display time', tsup-t0)
# Update analysis
self.update_current_auger_map()
tcam = time.time()
print('update auger map time', tcam-tsup)
def drift_correct(self):
t0 = time.time()
correct_chan = self.settings['drift_correct_adc_chan']
shift = compute_pairwise_shifts(self.adc_map_h5[:,0,:,:,correct_chan])
tps = time.time()
print('pairwise shifts time', tps-t0)
shift = np.concatenate([np.zeros((2,1)), shift],axis=1)
# Cumulative sum defines shift with respect to original image for each image
# Maxima and minima in cumulative x and y shifts defines box within which all images have defined pixels
shift_cumul = np.cumsum(shift, axis=1)
scan_shape_adc = self.adc_map_h5.shape
scan_shape_auger = self.auger_map_h5.shape
boxfd, boxdims = compute_retained_box(shift_cumul,
(scan_shape_adc[2], scan_shape_adc[3]))
trb = time.time()
print('retained box time', trb-tps)
align_shape_adc = (scan_shape_adc[0], scan_shape_adc[1], boxdims[0], boxdims[1], scan_shape_adc[4])
align_shape_auger = (scan_shape_auger[0], scan_shape_auger[1], boxdims[0], boxdims[1], scan_shape_auger[4])
self.adc_map_aligned_h5 = self.H.create_dataset('adc_map_aligned',
align_shape_adc,
self.adc_map_h5.dtype)
self.auger_map_aligned_h5 = self.H.create_dataset('auger_chan_map_aligned',
align_shape_auger,
self.auger_map_h5.dtype)
tdat = time.time()
print('create dataset time', tdat-trb)
# Shift images to align
for iFrame in range(0, scan_shape_adc[0]):
# Shift adc map
for iDet in range(0, align_shape_adc[-1]):
adc_shift = shift_subpixel(self.adc_map_h5[iFrame,0,:,:,iDet],
dx=shift_cumul[1, iFrame],
dy=shift_cumul[0, iFrame])
self.adc_map_aligned_h5[iFrame,0,:,:,iDet] = np.real(adc_shift[boxfd[0]:boxfd[1], boxfd[2]:boxfd[3]])
# Shift spectral data
for iDet in range(0, align_shape_auger[-1]):
auger_shift = shift_subpixel(self.auger_map_h5[iFrame,0,:,:,iDet],
dx=shift_cumul[1, iFrame],
dy=shift_cumul[0, iFrame])
self.auger_map_aligned_h5[iFrame,0,:,:,iDet] = np.real(auger_shift[boxfd[0]:boxfd[1], boxfd[2]:boxfd[3]])
talign = time.time()
print('align datasets time', talign-tdat)
# if self.settings['drift_correct_type'] == 'Pairwise + Running Avg':
# #### Phase 2: Running Average ####
#
# # Update the image shape
# imshape = imstack.shape
# imstack_run = imstack.copy()
# specstack_run = specstack.copy()
#
# # Prepare window function (Hann)
# win = np.outer(np.hanning(imshape[2]),np.hanning(imshape[3]))
#
# # Shifts to running average
# shift = np.zeros((2, num_frames))
# image = imstack[0,0,:,:,adc_chan]
# for iFrame in range(1, num_frames):
# offset_image = imstack[iFrame,0,:,:,adc_chan]
# # Calculate shift
# shift[:,iFrame], error, diffphase = register_translation_hybrid(image*win, offset_image*win, exponent = 0.3, upsample_factor = 100)
# # Perform shifts
# # Shift adc map
# for iDet in range(0, imstack.shape[4]):
# imstack_run[iFrame,0,:,:,iDet] = shift_subpixel(imstack[iFrame,0,:,:,iDet], dx = shift[1,iFrame], dy = shift[0, iFrame])
# # Shift spectral data
# for iDet in range(0, specstack.shape[4]):
# specstack_run[iFrame,0,:,:,iDet] = shift_subpixel(specstack[iFrame,0,:,:,iDet], dx = shift[1,iFrame], dy = shift[0, iFrame])
# # Update running average
# image = (iFrame/(iFrame+1)) * image + (1/(iFrame+1)) * imstack_run[iFrame,0,:,:,adc_chan]
# # Shifts are defined as [y, x] where y is shift of imaging location with respect to positive y axis, similarly for x
#
# # Determining coordinates of fully defined box for original image
#
# shift_y = shift[0,:]
# shift_x = shift[1,:]
#
# # NOTE: scan_shape indices 2, 3 correspond to y, x
# y1 = int(round(np.max(shift_y[shift_y >= 0])+0.001, 0))
# y2 = int(round(imshape[2] + np.min(shift_y[shift_y <= 0])-0.001, 0))
# x1 = int(round(np.max(shift_x[shift_x >= 0])+0.001, 0))
# x2 = int(round(imshape[3] + np.min(shift_x[shift_x <= 0])-0.001, 0))
#
# boxfd = np.array([y1, y2, x1, x2])
# boxdims = (boxfd[1]-boxfd[0], boxfd[3]-boxfd[2])
#
# # Keep only preserved data
# self.adc_map = np.real(imstack_run[:,:,boxfd[0]:boxfd[1], boxfd[2]:boxfd[3],:])
# self.auger_map = np.real(specstack_run[:,:,boxfd[0]:boxfd[1], boxfd[2]:boxfd[3],:])
# else:
# self.adc_map = imstack
# self.auger_map = specstack
def load_new_data(self):
if not self.data_loaded:
return
if self.settings['use_preprocess']:
h_datasets = list(self.H.keys())
if 'adc_map_prep' in h_datasets:
self.adc_map_h5 = self.H['adc_map_prep']
if 'auger_map_prep' in h_datasets:
self.auger_map_h5 = self.H['auger_map_prep']
if 'ke_prep' in h_datasets:
self.ke = self.H['ke_prep'][:]
else:
self.adc_map_h5 = self.H['adc_map']
self.auger_map_h5 = self.H['auger_chan_map']
self.ke = self.H['ke'][:]
# Calculate relative detector efficiencies
print('calculating detector efficiencies...')
self.calculate_detector_efficiencies()
# SEM image displays update
# FIX: Using auger stack for now to check correctness
print('setting SEM image stacks...')
self.imview_sem0_stack.setImage(np.transpose(self.auger_map_h5[:,:,:,:,0].mean(axis=1), (0,2,1)))
self.imview_sem1_stack.setImage(np.transpose(self.adc_map_h5[:,:,:,:,1].mean(axis=1), (0,2,1)))
print('setting SEM mean image...')
self.imview_sem0.setImage(np.transpose(self.auger_map_h5[:,:,:,:,0].mean(axis=(0,1))))
self.imview_sem1.setImage(np.transpose(self.adc_map_h5[:,:,:,:,1].mean(axis=(0,1))))
# Update Scale Bar
self.on_change_scalebar()
# Auger map and display update
print('updating auger map')
self.update_current_auger_map()
def on_change_ke_settings(self):
if not self.data_loaded:
return
print ("on_change_ke_settings")
S = self.settings
print('ke shape', self.ke.shape)
ke_map0 = (S['ke0_start'] < self.ke) * (self.ke < S['ke0_stop'])
ke_map1 = (S['ke1_start'] < self.ke) * (self.ke < S['ke1_stop'])
# KE of shape n_chans[7] x n_frames
# auger map shape:
# n_frames (0), n_subfames(1), n_y(2), n_x(3), n_chans(4)
# FIX: integral assumes all measured points count evenly towards integral,
# but depending on dispersion they may be clustered which calls into question
# the validity of this "integral" approximation
if ke_map0.sum() == 0:
self.A = np.zeros(self.current_auger_map.shape[2:4])
else:
auger_ke0_imgs = np.transpose(self.current_auger_map, (4,0,1,2,3))[ke_map0,0,:,:]
if self.settings['AB_mode'] == 'Mean':
self.A = auger_ke0_imgs.mean(axis=0)
elif self.settings['AB_mode'] == 'Integral':
deltaE = (S['ke0_stop'] - S['ke0_start'])/ke_map0.sum()
self.A = auger_ke0_imgs.sum(axis=0) * deltaE
if ke_map1.sum() == 0:
self.B = np.zeros(self.current_auger_map.shape[2:4])
else:
auger_ke1_imgs = np.transpose(self.current_auger_map, (4,0,1,2,3))[ke_map1,0,:,:]
if self.settings['AB_mode'] == 'Mean':
self.B = auger_ke1_imgs.mean(axis=0)
elif self.settings['AB_mode'] == 'Integral':
deltaE = (S['ke0_stop'] - S['ke0_start'])/ke_map1.sum()
self.B = auger_ke1_imgs.sum(axis=0) * deltaE
# Stored these arrays in object so could be updated/manipulated on demand more easily
self.imview_auger.setImage(self.compute_image(self.A,self.B))
self.lr0.setRegion((S['ke0_start'], S['ke0_stop']))
self.lr1.setRegion((S['ke1_start'], S['ke1_stop']))
def on_change_math_mode(self):
if not self.data_loaded:
return
self.imview_auger.setImage(self.compute_image(self.A,self.B))
def on_change_roi(self):
if not self.data_loaded:
return
# Only need to update the spectrum if being calculated over ROI
if self.settings['spectrum_over_ROI']:
self.update_spectrum_display()
def on_change_scalebar(self):
if not self.data_loaded:
return
# Calculate the scale length (in pixels for now)
scale_length = self.scalebar.size().x()
# Calculate scale bar position and local midpoint
scalebar_pos = self.scalebar.pos()
midpoint_x = scalebar_pos.x() + 0.5 * scale_length * np.cos(np.deg2rad(self.scalebar.angle()))
midpoint_y = scalebar_pos.y() + 0.5 * scale_length * np.sin(np.deg2rad(self.scalebar.angle()))
# Convert from pixels to _______meters
# 1. Determine pixel size in meters
scan_size_h = ((self.h_range[1]-self.h_range[0])/20.0) * self.full_size
scan_size_v = ((self.v_range[1]-self.v_range[0])/20.0) * self.full_size
pixel_size_h = (scan_size_h/self.nPixels[0])
pixel_size_v = (scan_size_v/self.nPixels[1])
# 2. Convert scale lengths along x and y to meters
scale_length_x = pixel_size_h * scale_length * np.cos(np.deg2rad(self.scalebar.angle()))
scale_length_y = pixel_size_v * scale_length * np.sin(np.deg2rad(self.scalebar.angle()))
# 3. Calculate new magnitude
scale_length_m = np.sqrt(np.square(scale_length_x) + np.square(scale_length_y))
if scale_length_m < 1e-6:
scale_length_m *= 1e9
scale_unit = ' nm'
else:
scale_length_m *= 1e6
scale_unit = ' um'
# Update scalebar text and position
self.scale_text.setText(str(np.around(scale_length_m,decimals=1)) + scale_unit)
self.scale_text.setPos(midpoint_x, midpoint_y)
# FIX: setAngle doesn't seem to work on angles between 60 and 120 degrees? Bizarre...
self.scale_text.setAngle(self.scalebar.angle())
def on_change_regions(self):
if not self.data_loaded:
return
S = self.settings
S['ke0_start'], S['ke0_stop'] = self.lr0.getRegion()
S['ke1_start'], S['ke1_stop'] = self.lr1.getRegion()
def on_change_spectrum_over_ROI(self):
self.update_spectrum_display()
def on_change_subtract_ke1_powerlaw(self):
self.update_spectrum_display()
def on_change_mean_spectrum_only(self):
if not self.data_loaded:
return
print('mean_spectrum_only')
for ii in range(7):
self.chan_plotlines[ii].setVisible(not(self.settings['mean_spectrum_only']))
self.legend.setVisible(not(self.settings['mean_spectrum_only']))
def calculate_detector_efficiencies(self):
if not self.data_loaded:
return
# Step 1. Identify and extract data to compare
# Determine highest, lowest, and middle energy detectors
num_chans = self.ke.shape[0]
det_rank = np.argsort(self.ke[:,0])
det_low = det_rank[0]
det_med = det_rank[len(det_rank)//2]
det_high = det_rank[-1]
# KE endpoints
ke_lower = self.ke[det_high,0]
ke_upper = self.ke[det_low,-1]
ke_med_map = (self.ke[det_med,:] >= ke_lower) * (self.ke[det_med,:] <= ke_upper)
# Extract data from the reference
ke_med = self.ke[det_med, ke_med_map]
auger_map_sum = np.sum(self.auger_map_h5[:,0,:,:,0:7],axis=(1,2))
data = np.transpose(auger_map_sum)
data_med = data[det_med, ke_med_map]
# Extract KE and data for other detector spectra
ke_rest = np.delete(self.ke, (det_med), axis = 0)
data_rest = np.delete(data, (det_med), axis = 0)
ke_step = self.ke[0,1] - self.ke[0,0]
ke_map = (ke_rest > ke_med[0] - 0.00001) * (ke_rest < ke_med[-1] + (ke_step-0.00001))
ke_sliced = ke_rest[ke_map]
ke_sliced = ke_sliced.reshape((num_chans-1, len(ke_sliced)//(num_chans-1)))
data_sliced = data_rest[ke_map].reshape((num_chans-1, ke_sliced.shape[1]))
# Step 2. Interpolate
data_intp = np.array([np.interp(ke_med, ke_sliced[idet], data_sliced[idet]) for idet in range(0, num_chans-1)])
# Add row back into stack
data_join = np.insert(data_intp, det_med, data_med, axis=0)
# Step 3. Integrate
self.det_eff = np.sum(data_join, axis=1)/np.sum(data_join[det_med,:])
def compute_image(self, A,B):
if not self.data_loaded:
return
mm = self.settings['math_mode']
return np.transpose(eval(mm))
def compute_total_spectrum0(self):
from scipy import interpolate
sum_Hz = self.current_auger_map[:,:,:,:,0].mean(axis=(1,2,3))
x0 = self.ke[0,:]
for i in range(1,7):
x = self.ke[i,:]
y=self.current_auger_map[:,:,:,:,i].mean(axis=(1,2,3))
ff = interpolate.interp1d(x,y,bounds_error=False)
sum_Hz += ff(x0)
return sum_Hz/7.0
def compute_total_spectrum(self, data = np.array([])):
from scipy import interpolate
n_frames = self.ke.shape[1]
self.total_spec = np.zeros(n_frames, dtype=float)
self.ke_interp = np.linspace(self.ke.min(), self.ke.max(), n_frames, dtype=float)
num_chans = self.current_auger_map.shape[-1]
for i in range(0,num_chans):
x = self.ke[i,:]
if data.size==0:
y = self.current_auger_map[:,:,:,:,i].mean(axis=(1,2,3))
else:
y = data[:,i]
ff = interpolate.interp1d(x,y,bounds_error=False)
self.total_spec += ff(self.ke_interp)
def update_current_auger_map(self):
if self.settings['update_auger_map']:
# Initialize copy of auger map in memory (current_auger_map)
# this copy will be modified throughout the analysis (needs to be float to handle the math)
auger_shape = self.auger_map_h5.shape
if auger_shape[-1] == 10:
self.current_auger_map = np.array(self.auger_map_h5[:,:,:,:,0:7], dtype='float')
# Convert to Hz
time_per_px = self.auger_map_h5[:,:,:,:,8:9]* 25e-9 # units of 25ns converted to seconds
self.current_auger_map /= time_per_px # auger map now in Hz
else:
self.current_auger_map = np.array(self.auger_map_h5, dtype='float')
# FIX: Auger map currently in counts since detectors have been summed but
# time channel was not stored
# Equalize detectors (does not apply to preprocess since detector averaging is already done)
if self.settings['equalize_detectors'] and not(self.settings['use_preprocess']):
self.current_auger_map /= self.det_eff
#normalize counts by spec resolution, Hz/eV
if self.settings['normalize_by_pass_energy']:
self.spec_plot.setLabel('left','Intensity (Hz/eV)')
spec_dispersion = 0.02 #Omicron SCA per-channel resolution/pass energy
if self.h_settings['CAE_mode']:
self.current_auger_map /= spec_dispersion * self.h_settings['pass_energy']
else:
#in CRR mode pass energy is KE / crr_ratio
self.current_auger_map *= self.h_settings['crr_ratio'] / (spec_dispersion * self.ke)
else:
self.spec_plot.setLabel('left','Intensity (Hz)')
# Spatial smoothing before analysis in either case
sigma_xy = self.settings['spatial_smooth_sigma'] # In terms of px?
if sigma_xy > 0.0:
print('spatial smoothing...')
self.current_auger_map = gaussian_filter(self.current_auger_map, (0,0,sigma_xy,sigma_xy,0))
if not self.settings['analysis_over_spectrum']:
self.perform_map_analysis()
# Update displays
self.update_spectrum_display()
self.on_change_ke_settings()
def update_spectrum_display(self):
if not self.data_loaded:
return
# Calculate the average spectrum over the image OR the ROI
if self.settings['spectrum_over_ROI']:
roi_auger_map = self.poly_roi.getArrayRegion(np.swapaxes(self.current_auger_map, 2, 3), self.im_auger, axes=(2,3))
roi_auger_masked = np.ma.array(roi_auger_map, mask = roi_auger_map == 0)
space_avg_spectra = roi_auger_masked.mean(axis=(1,2,3))
else:
space_avg_spectra = self.current_auger_map.mean(axis=(1,2,3))
# roi_slice, roi_tr = self.poly_roi.getArraySlice(self.auger_map, self.im_auger, axes=(3,2))
# print('ROI slice', roi_slice)
# print('Local Positions', self.poly_roi.getLocalHandlePositions())
# print('Scene Positions', self.poly_roi.getSceneHandlePositions())
#
#print(mapped_coords)
# compute and condition total spectrum
self.compute_total_spectrum(data = space_avg_spectra)
if self.settings['analysis_over_spectrum']:
self.perform_spectral_analysis()
# Display all spectra
num_chans = self.current_auger_map.shape[-1]
self.total_plotline.setData(self.ke_interp, self.total_spec)
for ii in range(num_chans):
self.chan_plotlines[ii].setData(self.ke[ii,:], space_avg_spectra[:,ii])
self.chan_plotlines[ii].setVisible(True)
if num_chans < 7:
for jj in range(num_chans,7):
self.chan_plotlines[jj].setVisible(False)
def perform_map_analysis(self):
# Smoothing (presently performed for individual detectors)
# FIX: MAY NEED A SUM MAP THAT ALIGNS DETECTOR CHANNELS AND SUMS
sigma_spec = self.settings['spectral_smooth_gauss_sigma'] # In terms of frames?
width_spec = self.settings['spectral_smooth_savgol_width'] # In terms of frames?
order_spec = self.settings['spectral_smooth_savgol_order']
if self.settings['spectral_smooth_type'] == 'Gaussian':
print('spectral smoothing...')
self.current_auger_map = gaussian_filter(self.current_auger_map, (sigma_spec,0,0,0,0))
elif self.settings['spectral_smooth_type'] == 'Savitzky-Golay':
# Currently always uses 4th order polynomial to fit
print('spectral smoothing...')
self.current_auger_map = savgol_filter(self.current_auger_map, 1 + 2*width_spec, order_spec, axis=0)
# Background subtraction (implemented detector-wise currently)
# NOTE: INSUFFICIENT SPATIAL SMOOTHING MAY GIVE INACCURATE OR EVEN INF RESULTS
if not(self.settings['subtract_ke1'] == 'None'):
print('Performing background subtraction...')
for iDet in range(self.current_auger_map.shape[-1]):
# Fit a power law to the background
# get background range
ke_min = self.settings['ke1_start']
ke_max = self.settings['ke1_stop']
fit_map = (self.ke[iDet] > ke_min) * (self.ke[iDet] < ke_max)
ke_to_fit = self.ke[iDet,fit_map]
spec_to_fit = self.current_auger_map[fit_map,0,:,:,iDet].transpose(1,2,0)
if self.settings['subtract_ke1'] == 'Power Law':
# Fit power law
A, m = self.fit_powerlaw(ke_to_fit, spec_to_fit)
ke_mat = np.tile(self.ke[iDet], (spec_to_fit.shape[0],spec_to_fit.shape[1],1)).transpose(2,0,1)
A = np.tile(A, (self.ke.shape[1], 1, 1))
m = np.tile(m, (self.ke.shape[1], 1, 1))
bg = A * ke_mat**m
elif self.settings['subtract_ke1'] == 'Linear':
# Fit line
m, b = self.fit_line(ke_to_fit, spec_to_fit)
ke_mat = np.tile(self.ke[iDet], (spec_to_fit.shape[0],spec_to_fit.shape[1],1)).transpose(2,0,1)
m = np.tile(m, (self.ke.shape[1], 1, 1))
b = np.tile(b, (self.ke.shape[1], 1, 1))
bg = m * ke_mat + b
self.current_auger_map[:,0,:,:,iDet] -= bg
if self.settings['subtract_tougaard']:
R_loss = self.settings['R_loss']
E_loss = self.settings['E_loss']
dE = self.ke[0,1] - self.ke[0,0]
# Always use a kernel out to 3 * E_loss to ensure enough feature size
ke_kernel = np.arange(0, 3*E_loss, abs(dE))
if not np.mod(len(ke_kernel),2) == 0:
ke_kernel = np.arange(0, 3*E_loss+dE, abs(dE))
self.K_toug = (8.0/np.pi**2)*R_loss*E_loss**2 * ke_kernel / ((2.0*E_loss/np.pi)**2 + ke_kernel**2)**2
# Normalize the kernel so the its area is equal to R_loss
self.K_toug /= (np.sum(self.K_toug) * dE)/R_loss
self.current_auger_map -= dE * correlate1d(self.current_auger_map, self.K_toug,
mode='nearest', origin=-len(ke_kernel)//2, axis=0)
def perform_spectral_analysis(self):
# FIX: Consolidate with map analysis
# Performs same analysis functions as the map, but just on the single (1D) total spectrum
# Smoothing (presently performed for individual detectors)
sigma_spec = self.settings['spectral_smooth_gauss_sigma'] # In terms of frames?
width_spec = self.settings['spectral_smooth_savgol_width'] # In terms of frames?
order_spec = self.settings['spectral_smooth_savgol_order']
if self.settings['spectral_smooth_type'] == 'Gaussian':
print('spectral smoothing...')
self.total_spec = gaussian_filter(self.total_spec, sigma_spec)
elif self.settings['spectral_smooth_type'] == 'Savitzky-Golay':
# Currently always uses 4th order polynomial to fit
print('spectral smoothing...')
self.total_spec = savgol_filter(self.total_spec, 1 + 2*width_spec, order_spec)
# Background subtraction (implemented detector-wise currently)
# NOTE: INSUFFICIENT SPATIAL SMOOTHING MAY GIVE INACCURATE OR EVEN INF RESULTS
if not(self.settings['subtract_ke1'] == 'None'):
print('Performing background subtraction...')
# Fit a power law to the background
# get background range
ke_min = self.settings['ke1_start']
ke_max = self.settings['ke1_stop']
fit_map = (self.ke_interp > ke_min) * (self.ke_interp < ke_max)
ke_to_fit = self.ke_interp[fit_map]
spec_to_fit = self.total_spec[fit_map]
if self.settings['subtract_ke1'] == 'Power Law':
# Fit power law
A, m = self.fit_powerlaw(ke_to_fit, spec_to_fit)
bg = A * self.ke_interp**m
elif self.settings['subtract_ke1'] == 'Linear':
# Fit line (there may be an easier way for 1D case)
m, b = self.fit_line(ke_to_fit, spec_to_fit)
bg = m * self.ke_interp + b
self.total_spec -= bg
if self.settings['subtract_tougaard']:
R_loss = self.settings['R_loss']
E_loss = self.settings['E_loss']
dE = self.ke_interp[1] - self.ke_interp[0]
# Always use a kernel out to 3 * E_loss to ensure enough feature size
ke_kernel = np.arange(0, 3*E_loss, abs(dE))
if not np.mod(len(ke_kernel),2) == 0:
ke_kernel = np.arange(0, 3*E_loss+dE, abs(dE))
self.K_toug = (8.0/np.pi**2)*R_loss*E_loss**2 * ke_kernel / ((2.0*E_loss/np.pi)**2 + ke_kernel**2)**2
# Normalize the kernel so the its area is equal to R_loss
self.K_toug /= (np.sum(self.K_toug) * dE)/R_loss
self.total_spec -= dE * correlate1d(self.total_spec, self.K_toug,
mode='nearest', origin=-len(ke_kernel)//2, axis=0)
def fit_powerlaw(self, x, y):
# Takes x data (1d array) and y data (Nd array)
# last dimension of y array is interpolation dimension
# Returns coefficients A,m for the powerlaw y = Ax^m that provide least squares best fit
# Solved algebraically for speed
# NOTE: This form goes to zero in correct limits for either primary or secondary electron backgrounds, but not combined
# e.g. secondary background has m < 0 such that y -> 0 as x -> inf
# e.g. primary background ahs m > 0 such that y -> 0 as x -> 0
# First, must interpolate data to equally spaced points in log space
# Currently, interpolation is linear
f_interp = interpolate.interp1d(x, y)
x_lsp = 10**np.linspace(np.log10(x[0]), np.log10(x[-1]), len(x)) # log equally spaced x
x_lsp[0] = x[0] # This prevents 10^ log10 operation from moving x values slightly out of interpolation range
x_lsp[-1] = x[-1]
y_lsp = f_interp(x_lsp)
# Then, solve linear least squares matrix equations for A, m
# XB = y where X is a matrix based on x data, B is (log(A),m), and y is the y data
# In this case, equation is generated using y_i = 1*(log(A)) + x_i * m where x and y are log(x) and log(y)
# Generate X matrix (X_i = [1, x_i])
X = np.ones((len(x_lsp), 2))
X[:,1] = np.log10(x_lsp[:])
# Solve "normal equations" for B that minimizes least sq
# B = C*y = {((X^T)X)^(-1) * (X^T)} y
C = np.linalg.inv(X.T.dot(X)).dot(X.T)
if len(y.shape) < 2:
B = C.dot(np.log10(y_lsp))
else:
B = C.dot(np.log10(np.swapaxes(y_lsp, -1, 1)))
return 10**B[0], B[1]
def fit_line(self, x, y):
# Takes x data (1d array) and y data (Nd array)
# last dimension of y array is fit dimension
# Solve linear least squares matrix equations for m, b of y = mx + b
# Returns m, b
# y_i = 1*b + x_i * m
# Generate X matrix (X_i = [1, x_i])
X = np.ones((len(x), 2))
X[:,1] = x[:]
# Solve "normal equations" for B that minimizes least sq
# B = C*y = {((X^T)X)^(-1) * (X^T)} y
C = np.linalg.inv(X.T.dot(X)).dot(X.T)
if len(y.shape) < 2:
B = C.dot(y)
else:
B = C.dot(np.swapaxes(y, -1, 1))
return B[1], B[0]
|
|
import re
import os
import shutil
import string
import logging
import subprocess
from subprocess import Popen, PIPE
from hashlib import sha1
from cStringIO import StringIO
from datetime import datetime
from glob import glob
import tg
import pysvn
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pymongo.errors import DuplicateKeyError
from pylons import c
from ming.base import Object
from ming.orm import Mapper, FieldProperty, session
from ming.utils import LazyProperty
from allura import model as M
from allura.lib import helpers as h
from allura.model.repository import GitLikeTree
from allura.model.auth import User
from allura.lib.utils import svn_path_exists
log = logging.getLogger(__name__)
class Repository(M.Repository):
tool_name='SVN'
repo_id='svn'
type_s='SVN Repository'
class __mongometa__:
name='svn-repository'
branches = FieldProperty([dict(name=str,object_id=str)])
@LazyProperty
def _impl(self):
return SVNImplementation(self)
def _log(self, rev, skip, max_count):
ci = self.commit(rev)
if ci is None: return []
return ci.log(int(skip), int(max_count))
def clone_command(self, category, username=''):
'''Return a string suitable for copy/paste that would clone this repo locally
category is one of 'ro' (read-only), 'rw' (read/write), or 'https' (read/write via https)
'''
if not username and c.user not in (None, User.anonymous()):
username = c.user.username
tpl = string.Template(tg.config.get('scm.clone.%s.%s' % (category, self.tool)) or
tg.config.get('scm.clone.%s' % self.tool))
return tpl.substitute(dict(username=username,
source_url=self.clone_url(category, username)+c.app.config.options.get('checkout_url'),
dest_path=self.suggested_clone_dest_path()))
def compute_diffs(self): return
def count(self, *args, **kwargs):
return super(Repository, self).count(None)
def log(self, branch=None, offset=0, limit=10):
return list(self._log(rev=branch, skip=offset, max_count=limit))
def latest(self, branch=None):
if self._impl is None: return None
if not self.heads: return None
return self._impl.commit(self.heads[0].object_id)
class SVNCalledProcessError(Exception):
def __init__(self, cmd, returncode, stdout, stderr):
self.cmd = cmd
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command: '%s' returned non-zero exit status %s\nSTDOUT: %s\nSTDERR: %s" % \
(self.cmd, self.returncode, self.stdout, self.stderr)
class SVNImplementation(M.RepositoryImplementation):
post_receive_template = string.Template(
'#!/bin/bash\n'
'# The following is required for site integration, do not remove/modify.\n'
'# Place user hook code in post-commit-user and it will be called from here.\n'
'curl -s $url\n'
'\n'
'DIR="$$(dirname "$${BASH_SOURCE[0]}")"\n'
'if [ -x $$DIR/post-commit-user ]; then'
' exec $$DIR/post-commit-user "$$@"\n'
'fi')
def __init__(self, repo):
self._repo = repo
@LazyProperty
def _svn(self):
return pysvn.Client()
@LazyProperty
def _url(self):
return 'file://%s%s' % (self._repo.fs_path, self._repo.name)
def shorthand_for_commit(self, oid):
return '[r%d]' % self._revno(oid)
def url_for_commit(self, commit):
if isinstance(commit, basestring):
object_id = commit
else:
object_id = commit._id
return '%s%d/' % (
self._repo.url(), self._revno(object_id))
def init(self, default_dirs=True, skip_special_files=False):
fullname = self._setup_paths()
log.info('svn init %s', fullname)
if os.path.exists(fullname):
shutil.rmtree(fullname)
subprocess.call(['svnadmin', 'create', self._repo.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._repo.fs_path)
if not skip_special_files:
self._setup_special_files()
self._repo.status = 'ready'
# make first commit with dir structure
if default_dirs:
self._repo._impl._svn.checkout('file://'+fullname, fullname+'/tmp')
os.mkdir(fullname+'/tmp/trunk')
os.mkdir(fullname+'/tmp/tags')
os.mkdir(fullname+'/tmp/branches')
self._repo._impl._svn.add(fullname+'/tmp/trunk')
self._repo._impl._svn.add(fullname+'/tmp/tags')
self._repo._impl._svn.add(fullname+'/tmp/branches')
self._repo._impl._svn.checkin([fullname+'/tmp/trunk',fullname+'/tmp/tags',fullname+'/tmp/branches'],'Initial commit')
shutil.rmtree(fullname+'/tmp')
def clone_from(self, source_url, copy_hooks=False):
'''Initialize a repo as a clone of another using svnsync'''
self.init(default_dirs=False, skip_special_files=True)
# Need a pre-revprop-change hook for cloning
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', 'pre-revprop-change')
with open(fn, 'wb') as fp:
fp.write('#!/bin/sh\n')
os.chmod(fn, 0755)
def check_call(cmd):
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input='p\n')
if p.returncode != 0:
self._repo.status = 'ready'
session(self._repo).flush(self._repo)
raise SVNCalledProcessError(cmd, p.returncode, stdout, stderr)
self._repo.status = 'importing'
session(self._repo).flush(self._repo)
log.info('Initialize %r as a clone of %s',
self._repo, source_url)
check_call(['svnsync', 'init', self._url, source_url])
check_call(['svnsync', '--non-interactive', 'sync', self._url])
log.info('... %r cloned', self._repo)
if not svn_path_exists("file://%s%s/%s" %
(self._repo.fs_path,
self._repo.name,
c.app.config.options['checkout_url'])):
c.app.config.options['checkout_url'] = ""
self._repo.refresh(notify=False)
self._setup_special_files(source_url, copy_hooks)
def refresh_heads(self):
info = self._svn.info2(
self._url,
revision=pysvn.Revision(pysvn.opt_revision_kind.head),
recurse=False)[0][1]
oid = self._oid(info.rev.number)
self._repo.heads = [ Object(name=None, object_id=oid) ]
# Branches and tags aren't really supported in subversion
self._repo.branches = []
self._repo.repo_tags = []
session(self._repo).flush(self._repo)
def commit(self, rev):
if rev in ('HEAD', None):
if not self._repo.heads: return None
oid = self._repo.heads[0].object_id
elif isinstance(rev, int) or rev.isdigit():
oid = self._oid(rev)
else:
oid = rev
result = M.repo.Commit.query.get(_id=oid)
if result is None: return None
result.set_context(self._repo)
return result
def all_commit_ids(self):
"""Return a list of commit ids, starting with the head (most recent
commit) and ending with the root (first commit).
"""
if not self._repo.heads:
return []
head_revno = self._revno(self._repo.heads[0].object_id)
return map(self._oid, range(head_revno, 0, -1))
def new_commits(self, all_commits=False):
head_revno = self._revno(self._repo.heads[0].object_id)
oids = [ self._oid(revno) for revno in range(1, head_revno+1) ]
if all_commits:
return oids
# Find max commit id -- everything greater than that will be "unknown"
prefix = self._oid('')
q = M.repo.Commit.query.find(
dict(
type='commit',
_id={'$gt':prefix},
),
dict(_id=True)
)
seen_oids = set()
for d in q.ming_cursor.cursor:
oid = d['_id']
if not oid.startswith(prefix): break
seen_oids.add(oid)
return [
oid for oid in oids if oid not in seen_oids ]
def refresh_commit_info(self, oid, seen_object_ids, lazy=True):
from allura.model.repo import CommitDoc, DiffInfoDoc
ci_doc = CommitDoc.m.get(_id=oid)
if ci_doc and lazy: return False
revno = self._revno(oid)
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty', oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
log_date = None
if hasattr(log_entry, 'date'):
log_date = datetime.utcfromtimestamp(log_entry.date)
user = Object(
name=log_entry.get('author', '--none--'),
email='',
date=log_date)
args = dict(
tree_id=None,
committed=user,
authored=user,
message=log_entry.get("message", "--none--"),
parent_ids=[],
child_ids=[])
if revno > 1:
args['parent_ids'] = [ self._oid(revno-1) ]
if ci_doc:
ci_doc.update(**args)
ci_doc.m.save()
else:
ci_doc = CommitDoc(dict(args, _id=oid))
try:
ci_doc.m.insert(safe=True)
except DuplicateKeyError:
if lazy: return False
# Save diff info
di = DiffInfoDoc.make(dict(_id=ci_doc._id, differences=[]))
for path in log_entry.changed_paths:
if path.action in ('A', 'M', 'R'):
try:
rhs_info = self._svn.info2(
self._url + h.really_unicode(path.path),
revision=self._revision(ci_doc._id),
recurse=False)[0][1]
rhs_id = self._obj_oid(ci_doc._id, rhs_info)
except pysvn.ClientError, e:
# pysvn will sometimes misreport deleted files (D) as
# something else (like A), causing info2() to raise a
# ClientError since the file doesn't exist in this
# revision. Set lrhs_id = None to treat like a deleted file
log.info('This error was handled gracefully and logged '
'for informational purposes only:\n' + str(e))
rhs_id = None
else:
rhs_id = None
if ci_doc.parent_ids and path.action in ('D', 'M', 'R'):
try:
lhs_info = self._svn.info2(
self._url + h.really_unicode(path.path),
revision=self._revision(ci_doc.parent_ids[0]),
recurse=False)[0][1]
lhs_id = self._obj_oid(ci_doc._id, lhs_info)
except pysvn.ClientError, e:
# pysvn will sometimes report new files as 'M'odified,
# causing info2() to raise ClientError since the file
# doesn't exist in the parent revision. Set lhs_id = None
# to treat like a newly added file.
log.info('This error was handled gracefully and logged '
'for informational purposes only:\n' + str(e))
lhs_id = None
else:
lhs_id = None
di.differences.append(dict(
name=h.really_unicode(path.path),
lhs_id=lhs_id,
rhs_id=rhs_id))
di.m.save()
return True
def compute_tree_new(self, commit, tree_path='/'):
from allura.model import repo as RM
tree_path = tree_path[:-1]
tree_id = self._tree_oid(commit._id, tree_path)
tree, isnew = RM.Tree.upsert(tree_id)
if not isnew: return tree_id
log.debug('Computing tree for %s: %s',
self._revno(commit._id), tree_path)
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for %s: %s(%s)',
self._repo, commit, tree_path)
tree.delete()
return None
log.debug('Compute tree for %d paths', len(infos))
for path, info in infos[1:]:
last_commit_id = self._oid(info['last_changed_rev'].number)
last_commit = M.repo.Commit.query.get(_id=last_commit_id)
M.repo_refresh.set_last_commit(
self._repo._id,
re.sub(r'/?$', '/', tree_path), # force it to end with /
path,
self._tree_oid(commit._id, path),
M.repo_refresh.get_commit_info(last_commit))
if info.kind == pysvn.node_kind.dir:
tree.tree_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
elif info.kind == pysvn.node_kind.file:
tree.blob_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
else:
assert False
session(tree).flush(tree)
trees_doc = RM.TreesDoc.m.get(_id=commit._id)
if not trees_doc:
trees_doc = RM.TreesDoc(dict(
_id=commit._id,
tree_ids=[]))
trees_doc.tree_ids.append(tree_id)
trees_doc.m.save(safe=False)
return tree_id
def _tree_oid(self, commit_id, path):
data = 'tree\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _blob_oid(self, commit_id, path):
data = 'blob\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _obj_oid(self, commit_id, info):
path = info.URL[len(info.repos_root_URL):]
if info.kind == pysvn.node_kind.dir:
return self._tree_oid(commit_id, path)
else:
return self._blob_oid(commit_id, path)
def log(self, object_id, skip, count):
revno = self._revno(object_id)
result = []
while count and revno:
if skip == 0:
result.append(self._oid(revno))
count -= 1
else:
skip -= 1
revno -= 1
if revno:
return result, [ self._oid(revno) ]
else:
return result, []
def open_blob(self, blob):
data = self._svn.cat(
self._url + blob.path(),
revision=self._revision(blob.commit._id))
return StringIO(data)
def blob_size(self, blob):
try:
data = self._svn.list(
self._url + blob.path(),
revision=self._revision(blob.commit._id),
dirent_fields=pysvn.SVN_DIRENT_SIZE)
except pysvn.ClientError:
log.info('ClientError getting filesize %r %r, returning 0', blob.path(), self._repo, exc_info=True)
return 0
try:
size = data[0][0]['size']
except (IndexError, KeyError):
log.info('Error getting filesize: bad data from svn client %r %r, returning 0', blob.path(), self._repo, exc_info=True)
size = 0
return size
def _copy_hooks(self, source_path):
'''Copy existing hooks if source path is given and exists.'''
if source_path is not None and source_path.startswith('file://'):
source_path = source_path[7:]
if source_path is None or not os.path.exists(source_path):
return
for hook in glob(os.path.join(source_path, 'hooks/*')):
filename = os.path.basename(hook)
target_filename = filename
if filename == 'post-commit':
target_filename = 'post-commit-user'
target = os.path.join(self._repo.full_fs_path, 'hooks', target_filename)
shutil.copy2(hook, target)
def _setup_hooks(self, source_path=None, copy_hooks=False):
'Set up the post-commit and pre-revprop-change hooks'
if copy_hooks:
self._copy_hooks(source_path)
# setup a post-commit hook to notify Allura of changes to the repo
# the hook should also call the user-defined post-commit-user hook
text = self.post_receive_template.substitute(
url=tg.config.get('base_url', 'http://localhost:8080')
+ '/auth/refresh_repo' + self._repo.url())
fn = os.path.join(self._repo.fs_path, self._repo.name, 'hooks', 'post-commit')
with open(fn, 'wb') as fp:
fp.write(text)
os.chmod(fn, 0755)
# create a blank pre-revprop-change file if one doesn't
# already exist to allow remote modification of revision
# properties (see http://svnbook.red-bean.com/en/1.1/ch05s02.html)
fn = os.path.join(self._repo.fs_path, self._repo.name, 'hooks', 'pre-revprop-change')
if not os.path.exists(fn):
with open(fn, 'wb') as fp:
fp.write('#!/bin/sh\n')
os.chmod(fn, 0755)
def _revno(self, oid):
return int(oid.split(':')[1])
def _revision(self, oid):
return pysvn.Revision(
pysvn.opt_revision_kind.number,
self._revno(oid))
def _oid(self, revno):
return '%s:%s' % (self._repo._id, revno)
Mapper.compile_all()
|
|
# -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customized separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customized as well:
Function Sub-Menu Access to (standard)
menu_modules() the modules menu the Eden modules
menu_admin() the Admin menu System/User Administration
menu_lang() the Language menu Selection of the GUI locale
menu_auth() the User menu Login, Logout, User Profile
menu_help() the Help menu Contact page, About page
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
main_menu = MMO()(
# Align left
MM()(
# Home link
HM(),
# Modules
cls.menu_modules()
),
# Service menus, align-right
MM(right=True)(
cls.menu_admin(),
#cls.menu_gis()
cls.menu_lang(),
cls.menu_auth(),
cls.menu_help(),
),
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
return [
MM("News Feed", c="default", f="index", args="newsfeed",
icon="icon-updates"),
MM("Map", c="gis", f="index",
icon="icon-map"),
MM("Projects", c="project", f="project"),
MM("Requests", c="req", f="req", m="search")(
MM("Fulfill Requests", f="req"),
MM("Request Supplies", f="req", m="create", vars={"type": 1}),
MM("Request People", f="req", m="create", vars={"type": 3})
),
MM("Locations", c="org", f="facility")(
MM("Facilities", c="org", f="facility", m="search"),
MM("Create a Facility", c="org", f="facility", m="create")
),
MM("Contacts", c="hrm", f="staff")(
MM("Staff", c="hrm", f="staff"),
MM("Groups", c="hrm", f="group"),
MM("Organizations", c="org", f="organisation"),
MM("Networks", c="org", f="group"),
#MM("People Registry", c="pr", f="index")
),
MM("Resources", c="inv", f="index")(
MM("Assets", c="asset", f="asset", m="search"),
MM("Inventory", c="inv", f="inv_item", m="search"),
MM("Stock Counts", c="inv", f="adj"),
MM("Shipments", c="inv", f="send")
),
MM("Cases", c="assess", f="building", m="search")(
MM("Building Assessments", f="building", m="search"),
MM("Canvass", f="canvass"),
),
MM("Survey", c="survey")(
MM("Templates", f="template"),
MM("Assessments", f="series"),
MM("Import Templates", f="question_list", m="import"),
),
]
# -------------------------------------------------------------------------
@classmethod
def menu_help(cls, **attr):
""" Help Menu """
menu_help = MM("Help", c="default", f="help",
icon="icon-question-sign", **attr
)(
MM("Contact us", f="contact"),
MM("About", f="about")
)
# -------------------------------------------------------------------
# Now add the available guided tours to the help menu
# check that a guided_tour is enabled
if current.deployment_settings.get_base_guided_tour():
# load the guided tour configuration from the database
table = current.s3db.tour_config
logged_in = current.auth.is_logged_in()
if logged_in:
query = (table.deleted == False) &\
(table.role != "")
else:
query = (table.deleted == False) &\
(table.role == "")
tours = current.db(query).select(table.id,
table.name,
table.controller,
table.function,
table.role,
)
if len(tours) > 0:
menu_help.append(SEP())
for row in tours:
menu_help.append(MM(row.name,
c=row.controller,
f=row.function,
vars={"tour":row.id},
restrict=row.role
)
)
return menu_help
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
logged_in = auth.is_logged_in()
self_registration = current.deployment_settings.get_security_self_registration()
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
menu_auth = MM("Login", c="default", f="user", m="login",
icon="icon-signin",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration),
MM("Lost Password", m="retrieve_password")
)
else:
# Logged-in
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False, link=False, _id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout",
icon="icon-off"),
MM("Profile", c="default", f="person", m="update",
icon="icon-user"
),
MM("Change Password", m="change_password",
icon="icon-lock"
),
# @ToDo:
#SEP(),
#MM({"name": current.T("Rapid Data Entry"),
# "id": "rapid_toggle",
# "value": current.session.s3.rapid_data_entry is True},
# f="rapid"),
)
return menu_auth
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls, **attr):
""" Language Menu """
settings = current.deployment_settings
if not settings.get_L10n_display_toolbar():
return None
s3 = current.response.s3
languages = s3.l10n_languages
lang = s3.language
request = current.request
menu_lang = MM("Language", icon="icon-comment-alt", **attr)
for language in languages:
menu_lang.append(MM(languages[language], r=request,
translate=False,
vars={"_language": language},
ltr=True,
icon="icon-check" if language == lang else "icon-check-empty"
))
return menu_lang
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customized separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
def hrm(self):
""" HRM / Human Resources Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
settings = current.deployment_settings
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
return M(c="hrm")(
M(settings.get_hrm_staff_label(), f="staff",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Import", f="person", m="import",
vars={"group":"staff"}, p="create"),
),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("New", m="create"),
M("List All"),
),
M("Department Catalog", f="department",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Job Title Catalog", f="job_title",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Skill Catalog", f="skill",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Skill Provisions", f="skill_provision"),
),
M("Personal Profile", f="person",
check=personal_mode, vars=dict(mode="personal")),
# This provides the link to switch to the manager mode:
M("Staff Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
M("Personal Profile", f="person",
check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
def inv(self):
""" INV / Inventory """
ADMIN = current.session.s3.system_roles.ADMIN
#current.s3db.inv_recv_crud_strings()
#crud_strings = current.response.s3.crud_strings
#inv_recv_list = crud_strings.inv_recv.title_list
#inv_recv_search = crud_strings.inv_recv.title_search
return M()(
M("Facilities", c="inv", f="facility")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
M("Search", m="search"),
M("Import", m="import")
),
M("Warehouse Stock", c="inv", f="inv_item")(
M("Search", f="inv_item", m="search"),
#M("Search Shipped Items", f="track_item", m="search"),
M("Stock Count", f="adj"),
#M("Kitting", f="kit"),
M("Import", f="inv_item", m="import", p="create"),
),
M("Reports", c="inv", f="inv_item")(
M("Warehouse Stock", f="inv_item",m="report"),
M("Expiration Report", c="inv", f="track_item",
m="search", vars=dict(report="exp")),
#M("Monetization Report", c="inv", f="inv_item",
# m="search", vars=dict(report="mon")),
#M("Utilization Report", c="inv", f="track_item",
# m="search", vars=dict(report="util")),
#M("Summary of Incoming Supplies", c="inv", f="track_item",
# m="search", vars=dict(report="inc")),
#M("Summary of Releases", c="inv", f="track_item",
# m="search", vars=dict(report="rel")),
),
#M(inv_recv_list, c="inv", f="recv")(
# M("New", m="create"),
# M("List All"),
# M("Search", m="search"),
#),
M("Sent Shipments", c="inv", f="send")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Search Shipped Items", f="track_item", m="search"),
),
M("Items", c="supply", f="item")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
)
# -------------------------------------------------------------------------
def org(self):
""" ORG / Organization Registry """
#ADMIN = current.session.s3.system_roles.ADMIN
return M(c="org")(
M("Facilities", f="facility")(
M("New", m="create"),
M("List All"),
#M("Review/Approve New", m="review"),
M("Map", m="map"),
M("Search", m="search"),
M("Import", m="import")
),
M("Organizations", f="organisation")(
M("New", m="create"),
M("List All"),
M("Import", m="import")
),
M("Facility Types", f="facility_type",
#restrict=[ADMIN]
)(
M("New", m="create"),
M("List All"),
),
M("Networks", f="group",
#restrict=[ADMIN]
)(
M("New", m="create"),
M("List All"),
),
M("Organization Types", f="organisation_type",
#restrict=[ADMIN]
)(
M("New", m="create"),
M("List All"),
),
)
# -------------------------------------------------------------------------
def project(self):
""" PROJECT / Project Tracking & Management """
menu = M(c="project")(
M("Projects", f="project")(
M("New", m="create"),
M("List All"),
M("Import", m="import", p="create"),
),
)
return menu
# -------------------------------------------------------------------------
def req(self):
""" REQ / Request Management """
db = current.db
SUPER = lambda i: \
db(db.auth_group.uuid=="super").select(db.auth_group.id,
limitby=(0, 1),
cache=s3db.cache
).first().id
return M(c="req")(
M("Requests", f="req")(
M("Request Supplies", m="create", vars={"type": 1}),
M("Request People", m="create", vars={"type": 3}),
M("Fulfill Requests"),
#M("List All"),
M("List Recurring Requests", f="req_template"),
#M("Search", m="search"),
#M("Map", m="map"),
M("Report", m="report"),
M("FEMA Items Required", f="fema", m="search",
restrict=[SUPER]),
M("Search All Requested Items", f="req_item", m="search"),
M("Search All Requested Skills", f="req_skill", m="search"),
),
#M("Priority Items", f="summary_option")(
# M("New", m="create"),
# M("List All"),
#),
M("Commitments", f="commit")(
M("List All")
),
M("Sent Shipments", f="send")(
#M("New", m="create"),
M("List All"),
#M("Search Shipped Items", f="track_item", m="search"),
),
M("Items", c="supply", f="item",
restrict=[SUPER])(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[SUPER])(
M("New", m="create"),
M("List All"),
),
)
# END =========================================================================
|
|
#########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import time
import testtools
from testtools.matchers import MatchesAny, Equals, GreaterThan
from nose.tools import nottest
from cloudify.workflows import local
from cloudify.decorators import operation
class TestExecuteOperationWorkflow(testtools.TestCase):
def setUp(self):
blueprint_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"resources/blueprints/execute_operation.yaml")
self.env = local.init_env(blueprint_path)
super(TestExecuteOperationWorkflow, self).setUp()
def test_execute_operation(self):
params = self._get_params()
self.env.execute('execute_operation', params)
self._make_filter_assertions(4)
def test_execute_operation_default_values(self):
params = {'operation': 'cloudify.interfaces.lifecycle.create'}
self.env.execute('execute_operation', params)
self._make_filter_assertions(4)
def test_execute_operation_with_operation_parameters(self):
self._test_execute_operation_with_op_params(
'cloudify.interfaces.lifecycle.create')
def test_execute_operation_with_op_params_and_kwargs_override_allowed(
self):
self._test_execute_operation_with_op_params(
'cloudify.interfaces.lifecycle.configure', True)
def test_execute_operation_with_op_params_and_kwargs_override_disallowed(
self):
self._test_exec_op_with_params_and_no_kwargs_override(False)
def test_execute_operation_with_op_params_and_default_kwargs_override(
self):
# testing kwargs override with the default value for the
# 'allow_kwargs_override' parameter (null/None)
self._test_exec_op_with_params_and_no_kwargs_override(None)
def _test_exec_op_with_params_and_no_kwargs_override(self, kw_over_val):
try:
self._test_execute_operation_with_op_params(
'cloudify.interfaces.lifecycle.configure', kw_over_val)
self.fail('expected kwargs override to be disallowed')
except RuntimeError, e:
self.assertIn(
'To allow redefinition, pass "allow_kwargs_override"', str(e))
def _test_execute_operation_with_op_params(self, op,
allow_kw_override=None):
operation_param_key = 'operation_param_key'
operation_param_value = 'operation_param_value'
op_params = {operation_param_key: operation_param_value}
params = self._get_params(op=op, op_params=op_params,
allow_kw_override=allow_kw_override)
self.env.execute('execute_operation', params)
self._make_filter_assertions(4)
instances = self.env.storage.get_node_instances()
for instance in instances:
self.assertIn('op_kwargs', instance.runtime_properties)
op_kwargs = instance.runtime_properties['op_kwargs']
self.assertIn(operation_param_key, op_kwargs)
self.assertEquals(operation_param_value,
op_kwargs[operation_param_key])
def test_execute_operation_by_nodes(self):
node_ids = ['node2', 'node3']
params = self._get_params(node_ids=node_ids)
self.env.execute('execute_operation', params)
self._make_filter_assertions(3, node_ids=node_ids)
def test_execute_operation_by_node_instances(self):
instances = self.env.storage.get_node_instances()
node_instance_ids = [instances[0].id, instances[3].id]
params = self._get_params(node_instance_ids=node_instance_ids)
self.env.execute('execute_operation', params)
self._make_filter_assertions(2, node_instance_ids=node_instance_ids)
def test_execute_operation_by_type_names(self):
type_names = ['mock_type2']
params = self._get_params(type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(3, type_names=type_names)
def test_execute_operation_by_nodes_and_types(self):
node_ids = ['node1', 'node2']
type_names = ['mock_type2']
params = self._get_params(node_ids=node_ids, type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(2, node_ids=node_ids,
type_names=type_names)
def test_execute_operation_by_nodes_types_and_node_instances(self):
node_ids = ['node2', 'node3']
type_names = ['mock_type2', 'mock_type1']
instances = self.env.storage.get_node_instances()
node_instance_ids = [next(inst.id for inst in instances if
inst.node_id == 'node2')]
params = self._get_params(node_ids=node_ids,
node_instance_ids=node_instance_ids,
type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(1, node_ids=node_ids,
node_instance_ids=node_instance_ids,
type_names=type_names)
def test_execute_operation_empty_intersection(self):
node_ids = ['node1', 'node2']
type_names = ['mock_type3']
params = self._get_params(node_ids=node_ids, type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(0, node_ids=node_ids,
type_names=type_names)
def test_execute_operation_with_dependency_order(self):
time_diff_assertions_pairs = [
(0, 1), # node 1 instance and node 2 instance
(0, 2), # node 1 instance and node 2 instance
(1, 3), # node 2 instance and node 3 instance
(2, 3) # node 2 instance and node 3 instance
]
self._dep_order_tests_helper([],
['node1', 'node2', 'node2', 'node3'],
time_diff_assertions_pairs)
def test_execute_operation_with_indirect_dependency_order(self):
time_diff_assertions_pairs = [
(0, 1), # node 1 instance and node 3 instance
]
self._dep_order_tests_helper(['node1', 'node3'],
['node1', 'node3'],
time_diff_assertions_pairs)
def _make_filter_assertions(self, expected_num_of_visited_instances,
node_ids=None, node_instance_ids=None,
type_names=None):
num_of_visited_instances = 0
instances = self.env.storage.get_node_instances()
nodes_by_id = dict((node.id, node) for node in
self.env.storage.get_nodes())
for inst in instances:
test_op_visited = inst.runtime_properties.get('test_op_visited')
if (not node_ids or inst.node_id in node_ids) \
and \
(not node_instance_ids or inst.id in node_instance_ids) \
and \
(not type_names or (next((type for type in nodes_by_id[
inst.node_id].type_hierarchy if type in type_names),
None))):
self.assertTrue(test_op_visited)
num_of_visited_instances += 1
else:
self.assertIsNone(test_op_visited)
# this is actually an assertion to ensure the tests themselves are ok
self.assertEquals(expected_num_of_visited_instances,
num_of_visited_instances)
def _dep_order_tests_helper(self, node_ids_param,
ordered_node_ids_of_instances,
indices_pairs_for_time_diff_assertions):
params = self._get_params(
op='cloudify.interfaces.lifecycle.start',
node_ids=node_ids_param,
run_by_dep=True)
self.env.execute('execute_operation', params, task_thread_pool_size=4)
instances_and_visit_times = sorted(
((inst, inst.runtime_properties['visit_time']) for inst in
self.env.storage.get_node_instances() if 'visit_time' in
inst.runtime_properties),
key=lambda inst_and_time: inst_and_time[1])
self.assertEqual(ordered_node_ids_of_instances,
[inst_and_time[0].node_id for inst_and_time in
instances_and_visit_times])
# asserting time difference between the operation execution for the
# different nodes. this way if something breaks and the tasks aren't
# dependent on one another, there's a better chance we'll catch
# it, since even if the order of the visits happens to be correct,
# it's less likely there'll be a significant time difference between
# the visits
def assert_time_difference(earlier_inst_index, later_inst_index):
td = instances_and_visit_times[later_inst_index][1] - \
instances_and_visit_times[earlier_inst_index][1]
self.assertThat(td, MatchesAny(Equals(1), GreaterThan(1)))
for index1, index2 in indices_pairs_for_time_diff_assertions:
assert_time_difference(index1, index2)
def _get_params(self, op='cloudify.interfaces.lifecycle.create',
op_params=None, run_by_dep=False,
allow_kw_override=None, node_ids=None,
node_instance_ids=None, type_names=None):
return {
'operation': op,
'operation_kwargs': op_params or {},
'run_by_dependency_order': run_by_dep,
'allow_kwargs_override': allow_kw_override,
'node_ids': node_ids or [],
'node_instance_ids': node_instance_ids or [],
'type_names': type_names or []
}
class TestScale(testtools.TestCase):
def setUp(self):
super(TestScale, self).setUp()
blueprint_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"resources/blueprints/test-scale-blueprint.yaml")
self.env = local.init_env(blueprint_path)
def test_no_node(self):
with testtools.ExpectedException(ValueError, ".*mock doesn't exist.*"):
self.env.execute('scale', parameters={'node_id': 'mock'})
def test_zero_delta(self):
# should simply work
self.env.execute('scale', parameters={'node_id': 'node',
'delta': 0})
def test_illegal_delta(self):
with testtools.ExpectedException(ValueError, ".*-1 is illegal.*"):
self.env.execute('scale', parameters={'node_id': 'node',
'delta': -1})
@nottest
@operation
def exec_op_test_operation(ctx, **kwargs):
ctx.instance.runtime_properties['test_op_visited'] = True
if kwargs:
ctx.instance.runtime_properties['op_kwargs'] = kwargs
@nottest
@operation
def exec_op_dependency_order_test_operation(ctx, **kwargs):
ctx.instance.runtime_properties['visit_time'] = time.time()
time.sleep(1)
|
|
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# All rights reserved; available under the terms of the BSD License.
from __future__ import with_statement
import sys
import os
import unittest
from os.path import dirname
import subprocess
import shutil
import zipfile
import threading
import tempfile
import urllib2
import hashlib
import tarfile
import time
from contextlib import contextmanager
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from distutils.core import setup as dist_setup
from distutils import dir_util
import esky
import esky.patch
import esky.sudo
from esky import bdist_esky
from esky.bdist_esky import Executable
from esky.util import extract_zipfile, deep_extract_zipfile, get_platform, \
ESKY_CONTROL_DIR, files_differ, ESKY_APPDATA_DIR, \
really_rmtree
from esky.fstransact import FSTransaction
try:
import py2exe
except ImportError:
py2exe = None
try:
import py2app
except ImportError:
py2app = None
try:
import bbfreeze
except ImportError:
bbfreeze = None
try:
import cx_Freeze
except ImportError:
cx_Freeze = None
try:
import pypy
except ImportError:
pypy = None
sys.path.append(os.path.dirname(__file__))
def assert_freezedir_exists(dist):
assert os.path.exists(dist.freeze_dir)
if not hasattr(HTTPServer,"shutdown"):
import socket
def socketserver_shutdown(self):
try:
self.socket.close()
except socket.error:
pass
HTTPServer.shutdown = socketserver_shutdown
@contextmanager
def setenv(key,value):
oldval = os.environ.get(key,None)
os.environ[key] = value
yield
if oldval is not None:
os.environ[key] = oldval
else:
del os.environ[key]
class TestEsky(unittest.TestCase):
if py2exe is not None:
def test_esky_py2exe(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe"}})
def test_esky_py2exe_bundle1(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"bundle_files": 1}}})
def test_esky_py2exe_bundle2(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"bundle_files": 2}}})
def test_esky_py2exe_bundle3(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"bundle_files": 3}}})
def test_esky_py2exe_skiparchive(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"skip_archive": True}}})
def test_esky_py2exe_unbuffered(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"unbuffered": True}}})
def test_esky_py2exe_nocustomchainload(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
bscode = "_chainload = _orig_chainload\nbootstrap()"
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"bootstrap_code":bscode}})
if esky.sudo.can_get_root():
def test_esky_py2exe_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe"}})
if pypy is not None:
def test_esky_py2exe_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"compile_bootstrap_exes":1}})
def test_esky_py2exe_unbuffered_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"compile_bootstrap_exes":1,
"freezer_options": {
"unbuffered": True}}})
if py2app is not None:
def test_esky_py2app(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2app"}})
if esky.sudo.can_get_root():
def test_esky_py2app_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2app"}})
if pypy is not None:
def test_esky_py2app_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2app",
"compile_bootstrap_exes":1}})
if bbfreeze is not None:
def test_esky_bbfreeze(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze"}})
if sys.platform == "win32":
def test_esky_bbfreeze_nocustomchainload(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
bscode = "_chainload = _orig_chainload\nbootstrap()"
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze",
"bootstrap_code":bscode}})
if esky.sudo.can_get_root():
def test_esky_bbfreeze_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze"}})
if pypy is not None:
def test_esky_bbfreeze_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze",
"compile_bootstrap_exes":1}})
if cx_Freeze is not None:
def test_esky_cxfreeze(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze"}})
if sys.platform == "win32":
def test_esky_cxfreeze_nocustomchainload(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
bscode = ["_chainload = _orig_chainload",None]
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze",
"bootstrap_code":bscode}})
if esky.sudo.can_get_root():
def test_esky_cxfreeze_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze"}})
if pypy is not None:
def test_esky_cxfreeze_pypy(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze",
"compile_bootstrap_exes":1}})
def _run_eskytester(self,options):
"""Build and run the eskytester app using the given distutils options.
The "eskytester" application can be found next to this file, and the
sequence of tests performed range across "script1.py" to "script3.py".
"""
olddir = os.path.abspath(os.curdir)
# tdir = os.path.join(os.path.dirname(__file__),"DIST")
# if os.path.exists(tdir):
# really_rmtree(tdir)
# os.mkdir(tdir)
tdir = tempfile.mkdtemp()
server = None
script2 = None
try:
options.setdefault("build",{})["build_base"] = os.path.join(tdir,"build")
options.setdefault("bdist",{})["dist_dir"] = os.path.join(tdir,"dist")
# Set some callbacks to test that they work correctly
options.setdefault("bdist_esky",{}).setdefault("pre_freeze_callback","esky.tests.test_esky.assert_freezedir_exists")
options.setdefault("bdist_esky",{}).setdefault("pre_zip_callback",assert_freezedir_exists)
platform = get_platform()
deploydir = "deploy.%s" % (platform,)
esky_root = dirname(dirname(dirname(__file__)))
os.chdir(tdir)
shutil.copytree(os.path.join(esky_root,"esky","tests","eskytester"),"eskytester")
dir_util._path_created.clear()
# Build three increasing versions of the test package.
# Version 0.2 will include a bundled MSVCRT on win32.
# Version 0.3 will be distributed as a patch.
metadata = dict(name="eskytester",packages=["eskytester"],author="rfk",
description="the esky test package",
data_files=[("data",["eskytester/datafile.txt"])],
package_data={"eskytester":["pkgdata.txt"]},)
options2 = options.copy()
options2["bdist_esky"] = options["bdist_esky"].copy()
options2["bdist_esky"]["bundle_msvcrt"] = True
script1 = "eskytester/script1.py"
script2 = Executable([None,open("eskytester/script2.py")],name="script2")
script3 = "eskytester/script3.py"
dist_setup(version="0.1",scripts=[script1],options=options,script_args=["bdist_esky"],**metadata)
dist_setup(version="0.2",scripts=[script1,script2],options=options2,script_args=["bdist_esky"],**metadata)
dist_setup(version="0.3",scripts=[script2,script3],options=options,script_args=["bdist_esky_patch"],**metadata)
os.unlink(os.path.join(tdir,"dist","eskytester-0.3.%s.zip"%(platform,)))
# Check that the patches apply cleanly
uzdir = os.path.join(tdir,"unzip")
deep_extract_zipfile(os.path.join(tdir,"dist","eskytester-0.1.%s.zip"%(platform,)),uzdir)
with open(os.path.join(tdir,"dist","eskytester-0.3.%s.from-0.1.patch"%(platform,)),"rb") as f:
esky.patch.apply_patch(uzdir,f)
really_rmtree(uzdir)
deep_extract_zipfile(os.path.join(tdir,"dist","eskytester-0.2.%s.zip"%(platform,)),uzdir)
with open(os.path.join(tdir,"dist","eskytester-0.3.%s.from-0.2.patch"%(platform,)),"rb") as f:
esky.patch.apply_patch(uzdir,f)
really_rmtree(uzdir)
# Serve the updates at http://localhost:8000/dist/
print "running local update server"
server = HTTPServer(("localhost",8000),SimpleHTTPRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Set up the deployed esky environment for the initial version
zfname = os.path.join(tdir,"dist","eskytester-0.1.%s.zip"%(platform,))
os.mkdir(deploydir)
extract_zipfile(zfname,deploydir)
# Run the scripts in order.
if options["bdist_esky"]["freezer_module"] == "py2app":
appdir = os.path.join(deploydir,os.listdir(deploydir)[0])
cmd1 = os.path.join(appdir,"Contents","MacOS","script1")
cmd2 = os.path.join(appdir,"Contents","MacOS","script2")
cmd3 = os.path.join(appdir,"Contents","MacOS","script3")
else:
appdir = deploydir
if sys.platform == "win32":
cmd1 = os.path.join(deploydir,"script1.exe")
cmd2 = os.path.join(deploydir,"script2.exe")
cmd3 = os.path.join(deploydir,"script3.exe")
else:
cmd1 = os.path.join(deploydir,"script1")
cmd2 = os.path.join(deploydir,"script2")
cmd3 = os.path.join(deploydir,"script3")
print "spawning eskytester script1", options["bdist_esky"]["freezer_module"]
os.unlink(os.path.join(tdir,"dist","eskytester-0.1.%s.zip"%(platform,)))
p = subprocess.Popen(cmd1)
assert p.wait() == 0
os.unlink(os.path.join(appdir,"tests-completed"))
print "spawning eskytester script2"
os.unlink(os.path.join(tdir,"dist","eskytester-0.2.%s.zip"%(platform,)))
p = subprocess.Popen(cmd2)
assert p.wait() == 0
os.unlink(os.path.join(appdir,"tests-completed"))
print "spawning eskytester script3"
p = subprocess.Popen(cmd3)
assert p.wait() == 0
os.unlink(os.path.join(appdir,"tests-completed"))
finally:
if script2:
script2.script[1].close()
os.chdir(olddir)
if sys.platform == "win32":
# wait for the cleanup-at-exit pocess to finish
time.sleep(4)
really_rmtree(tdir)
if server:
server.shutdown()
def test_esky_locking(self):
"""Test that locking an Esky works correctly."""
platform = get_platform()
appdir = tempfile.mkdtemp()
try:
vdir = os.path.join(appdir,ESKY_APPDATA_DIR,"testapp-0.1.%s" % (platform,))
os.makedirs(vdir)
os.mkdir(os.path.join(vdir,ESKY_CONTROL_DIR))
open(os.path.join(vdir,ESKY_CONTROL_DIR,"bootstrap-manifest.txt"),"wb").close()
e1 = esky.Esky(appdir,"http://example.com/downloads/")
assert e1.name == "testapp"
assert e1.version == "0.1"
assert e1.platform == platform
e2 = esky.Esky(appdir,"http://example.com/downloads/")
assert e2.name == "testapp"
assert e2.version == "0.1"
assert e2.platform == platform
locked = []; errors = [];
trigger1 = threading.Event(); trigger2 = threading.Event()
def runit(e,t1,t2):
def runme():
try:
e.lock()
except Exception, err:
errors.append(err)
else:
locked.append(e)
t1.set()
t2.wait()
return runme
t1 = threading.Thread(target=runit(e1,trigger1,trigger2))
t2 = threading.Thread(target=runit(e2,trigger2,trigger1))
t1.start()
t2.start()
t1.join()
t2.join()
assert len(locked) == 1
assert (e1 in locked or e2 in locked)
assert len(errors) == 1
assert isinstance(errors[0],esky.EskyLockedError)
finally:
really_rmtree(appdir)
def test_esky_lock_breaking(self):
"""Test that breaking the lock on an Esky works correctly."""
appdir = tempfile.mkdtemp()
try:
os.makedirs(os.path.join(appdir,ESKY_APPDATA_DIR,"testapp-0.1",ESKY_CONTROL_DIR))
open(os.path.join(appdir,ESKY_APPDATA_DIR,"testapp-0.1",ESKY_CONTROL_DIR,"bootstrap-manifest.txt"),"wb").close()
e1 = esky.Esky(appdir,"http://example.com/downloads/")
e2 = esky.Esky(appdir,"http://example.com/downloads/")
trigger1 = threading.Event(); trigger2 = threading.Event()
errors = []
def run1():
try:
e1.lock()
except Exception, err:
errors.append(err)
trigger1.set()
trigger2.wait()
def run2():
trigger1.wait()
try:
e2.lock()
except esky.EskyLockedError:
pass
except Exception, err:
errors.append(err)
else:
errors.append("locked when I shouldn't have")
e2.lock_timeout = 0.1
time.sleep(0.5)
try:
e2.lock()
except Exception, err:
errors.append(err)
trigger2.set()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
assert len(errors) == 0, str(errors)
finally:
really_rmtree(appdir)
def test_README(self):
"""Ensure that the README is in sync with the docstring.
This test should always pass; if the README is out of sync it just updates
it with the contents of esky.__doc__.
"""
dirname = os.path.dirname
readme = os.path.join(dirname(dirname(dirname(__file__))),"README.rst")
if not os.path.isfile(readme):
f = open(readme,"wb")
f.write(esky.__doc__.encode())
f.close()
else:
f = open(readme,"rb")
if f.read() != esky.__doc__:
f.close()
f = open(readme,"wb")
f.write(esky.__doc__.encode())
f.close()
class TestFSTransact(unittest.TestCase):
"""Testcases for FSTransact."""
def setUp(self):
self.testdir = tempfile.mkdtemp()
def tearDown(self):
really_rmtree(self.testdir)
def path(self,path):
return os.path.join(self.testdir,path)
def setContents(self,path,contents=""):
if not os.path.isdir(os.path.dirname(self.path(path))):
os.makedirs(os.path.dirname(self.path(path)))
with open(self.path(path),"wb") as f:
f.write(contents.encode())
def assertContents(self,path,contents):
with open(self.path(path),"rb") as f:
self.assertEquals(f.read().decode(),contents)
def test_no_move_outside_root(self):
self.setContents("file1","hello world")
trn = FSTransaction(self.testdir)
trn.move(self.path("file1"),"file2")
trn.commit()
self.assertContents("file2","hello world")
trn = FSTransaction(self.testdir)
self.assertRaises(ValueError,trn.move,self.path("file2"),"../file1")
trn.abort()
def test_move_file(self):
self.setContents("file1","hello world")
trn = FSTransaction()
trn.move(self.path("file1"),self.path("file2"))
self.assertContents("file1","hello world")
self.assertFalse(os.path.exists(self.path("file2")))
trn.commit()
self.assertContents("file2","hello world")
self.assertFalse(os.path.exists(self.path("file1")))
def test_move_file_with_unicode_name(self):
self.setContents(u"file\N{SNOWMAN}","hello world")
trn = FSTransaction()
trn.move(self.path(u"file\N{SNOWMAN}"),self.path("file2"))
self.assertContents(u"file\N{SNOWMAN}","hello world")
self.assertFalse(os.path.exists(self.path("file2")))
trn.commit()
self.assertContents("file2","hello world")
self.assertFalse(os.path.exists(self.path(u"file\N{SNOWMAN}")))
def test_copy_file(self):
self.setContents("file1","hello world")
trn = FSTransaction()
trn.copy(self.path("file1"),self.path("file2"))
self.assertContents("file1","hello world")
self.assertFalse(os.path.exists(self.path("file2")))
trn.commit()
self.assertContents("file1","hello world")
self.assertContents("file2","hello world")
def test_move_dir(self):
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.move(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
self.assertFalse(os.path.exists(self.path("dir2")))
trn.commit()
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertFalse(os.path.exists(self.path("dir1")))
def test_copy_dir(self):
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.copy(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
self.assertFalse(os.path.exists(self.path("dir2")))
trn.commit()
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertContents("dir1/file1","hello world")
self.assertContents("dir1/file2","how are you?")
self.assertContents("dir1/subdir/file3","fine thanks")
def test_remove(self):
self.setContents("dir1/file1","hello there world")
trn = FSTransaction()
trn.remove(self.path("dir1/file1"))
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn.commit()
self.assertFalse(os.path.exists(self.path("dir1/file1")))
self.assertTrue(os.path.exists(self.path("dir1")))
trn = FSTransaction()
trn.remove(self.path("dir1"))
trn.commit()
self.assertFalse(os.path.exists(self.path("dir1")))
def test_remove_abort(self):
self.setContents("dir1/file1","hello there world")
trn = FSTransaction()
trn.remove(self.path("dir1/file1"))
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn.abort()
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn = FSTransaction()
trn.remove(self.path("dir1"))
trn.abort()
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn = FSTransaction()
trn.remove(self.path("dir1"))
trn.commit()
self.assertFalse(os.path.exists(self.path("dir1")))
def test_move_dir_exists(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
self.setContents("dir2/file1","different contents")
self.setContents("dir2/file3","a different file")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.move(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertFalse(os.path.exists(self.path("dir2/file3")))
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertFalse(os.path.exists(self.path("dir1")))
def test_copy_dir_exists(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
self.setContents("dir2/file1","different contents")
self.setContents("dir2/file3","a different file")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.copy(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertFalse(os.path.exists(self.path("dir2/file3")))
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertContents("dir1/file0","zero zero zero")
self.assertContents("dir1/file1","hello world")
self.assertContents("dir1/file2","how are you?")
self.assertContents("dir1/subdir/file3","fine thanks")
def test_move_dir_over_file(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir2","actually a file")
trn = FSTransaction()
trn.move(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file0","zero zero zero")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertFalse(os.path.exists(self.path("dir1")))
def test_copy_dir_over_file(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir2","actually a file")
trn = FSTransaction()
trn.copy(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file0","zero zero zero")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertContents("dir1/file0","zero zero zero")
def test_move_file_over_dir(self):
self.setContents("file0","zero zero zero")
self.setContents("dir2/myfile","hahahahaha!")
trn = FSTransaction()
trn.move(self.path("file0"),self.path("dir2"))
self.assertContents("file0","zero zero zero")
self.assertContents("dir2/myfile","hahahahaha!")
trn.commit()
self.assertContents("dir2","zero zero zero")
self.assertFalse(os.path.exists(self.path("file0")))
def test_copy_file_over_dir(self):
self.setContents("file0","zero zero zero")
self.setContents("dir2/myfile","hahahahaha!")
trn = FSTransaction()
trn.copy(self.path("file0"),self.path("dir2"))
self.assertContents("file0","zero zero zero")
self.assertContents("dir2/myfile","hahahahaha!")
trn.commit()
self.assertContents("dir2","zero zero zero")
self.assertContents("file0","zero zero zero")
class TestPatch(unittest.TestCase):
"""Testcases for esky.patch."""
_TEST_FILES = (
("pyenchant-1.2.0.tar.gz","2fefef0868b110b1da7de89c08344dd2"),
("pyenchant-1.5.2.tar.gz","fa1e4f3f3c473edd98c7bb0e46eea352"),
("pyenchant-1.6.0.tar.gz","3fd7336989764d8d379a367236518439"),
)
_TEST_FILES_URL = "http://pypi.python.org/packages/source/p/pyenchant/"
def setUp(self):
self.tests_root = dirname(__file__)
platform = get_platform()
self.tfdir = tfdir = os.path.join(self.tests_root,"patch-test-files")
self.workdir = workdir = os.path.join(self.tests_root,"patch-test-temp."+platform)
if not os.path.isdir(tfdir):
os.makedirs(tfdir)
if not os.path.isdir(workdir):
os.makedirs(workdir)
# Ensure we have the expected test files.
# Download from PyPI if necessary.
for (tfname,hash) in self._TEST_FILES:
tfpath = os.path.join(tfdir,tfname)
if not os.path.exists(tfpath):
data = urllib2.urlopen(self._TEST_FILES_URL+tfname).read()
assert hashlib.md5(data).hexdigest() == hash
with open(tfpath,"wb") as f:
f.write(data)
def tearDown(self):
really_rmtree(self.workdir)
def test_patch_bigfile(self):
tdir = tempfile.mkdtemp()
try:
data = [os.urandom(100)*10 for i in xrange(6)]
for nm in ("source","target"):
with open(os.path.join(tdir,nm),"wb") as f:
for i in xrange(1000):
for chunk in data:
f.write(chunk)
data[2],data[3] = data[3],data[2]
with open(os.path.join(tdir,"patch"),"wb") as f:
esky.patch.write_patch(os.path.join(tdir,"source"),os.path.join(tdir,"target"),f)
dgst1 = esky.patch.calculate_digest(os.path.join(tdir,"target"))
dgst2 = esky.patch.calculate_digest(os.path.join(tdir,"source"))
self.assertNotEquals(dgst1,dgst2)
with open(os.path.join(tdir,"patch"),"rb") as f:
esky.patch.apply_patch(os.path.join(tdir,"source"),f)
dgst3 = esky.patch.calculate_digest(os.path.join(tdir,"source"))
self.assertEquals(dgst1,dgst3)
finally:
really_rmtree(tdir)
def test_diffing_back_and_forth(self):
for (tf1,_) in self._TEST_FILES:
for (tf2,_) in self._TEST_FILES:
path1 = self._extract(tf1,"source")
path2 = self._extract(tf2,"target")
with open(os.path.join(self.workdir,"patch"),"wb") as f:
esky.patch.write_patch(path1,path2,f)
if tf1 != tf2:
self.assertNotEquals(esky.patch.calculate_digest(path1),
esky.patch.calculate_digest(path2))
with open(os.path.join(self.workdir,"patch"),"rb") as f:
esky.patch.apply_patch(path1,f)
self.assertEquals(esky.patch.calculate_digest(path1),
esky.patch.calculate_digest(path2))
def test_apply_patch(self):
path1 = self._extract("pyenchant-1.2.0.tar.gz","source")
path2 = self._extract("pyenchant-1.6.0.tar.gz","target")
path1 = os.path.join(path1,"pyenchant-1.2.0")
path2 = os.path.join(path2,"pyenchant-1.6.0")
pf = os.path.join(self.tfdir,"v1.2.0_to_v1.6.0.patch")
if not os.path.exists(pf):
pf = os.path.join(dirname(esky.__file__),"tests","patch-test-files","v1.2.0_to_v1.6.0.patch")
with open(pf,"rb") as f:
esky.patch.apply_patch(path1,f)
self.assertEquals(esky.patch.calculate_digest(path1),
esky.patch.calculate_digest(path2))
def test_copying_multiple_targets_from_a_single_sibling(self):
join = os.path.join
src_dir = src_dir = join(self.workdir, "source")
tgt_dir = tgt_dir = join(self.workdir, "target")
for dirnm in src_dir, tgt_dir:
os.mkdir(dirnm)
zf = zipfile.ZipFile(join(self.tfdir, "movefrom-source.zip"), "r")
zf.extractall(src_dir)
zf = zipfile.ZipFile(join(self.tfdir, "movefrom-target.zip"), "r")
zf.extractall(tgt_dir)
# The two directory structures should initially be difference.
self.assertNotEquals(esky.patch.calculate_digest(src_dir),
esky.patch.calculate_digest(tgt_dir))
# Create patch from source to target.
patch_fname = join(self.workdir, "patch")
with open(patch_fname, "wb") as patchfile:
esky.patch.write_patch(src_dir, tgt_dir, patchfile)
# Try to apply the patch.
with open(patch_fname, "rb") as patchfile:
esky.patch.apply_patch(src_dir, patchfile)
# Then the two directory structures should be equal.
self.assertEquals(esky.patch.calculate_digest(src_dir),
esky.patch.calculate_digest(tgt_dir))
def _extract(self,filename,dest):
dest = os.path.join(self.workdir,dest)
if os.path.exists(dest):
really_rmtree(dest)
f = tarfile.open(os.path.join(self.tfdir,filename),"r:gz")
try:
f.extractall(dest)
finally:
f.close()
return dest
class TestPatch_cxbsdiff(TestPatch):
"""Test the patching code with cx-bsdiff rather than bsdiff4."""
def setUp(self):
self.__orig_bsdiff4 = esky.patch.bsdiff4
if esky.patch.bsdiff4_cx is not None:
esky.patch.bsdiff4 = esky.patch.bsdiff4_cx
return super(TestPatch_cxbsdiff,self).setUp()
def tearDown(self):
esky.patch.bsdiff4 = self.__orig_bsdiff4
return super(TestPatch_cxbsdiff,self).tearDown()
class TestPatch_pybsdiff(TestPatch):
"""Test the patching code with pure-python bsdiff4."""
def setUp(self):
self.__orig_bsdiff4 = esky.patch.bsdiff4
esky.patch.bsdiff4 = esky.patch.bsdiff4_py
return super(TestPatch_pybsdiff,self).setUp()
def tearDown(self):
esky.patch.bsdiff4 = self.__orig_bsdiff4
return super(TestPatch_pybsdiff,self).tearDown()
class TestFilesDiffer(unittest.TestCase):
def setUp(self):
self.tdir = tempfile.mkdtemp()
def _path(self,*names):
return os.path.join(self.tdir,*names)
def _differs(self,data1,data2,start=0,stop=None):
with open(self._path("file1"),"wb") as f:
f.write(data1.encode("ascii"))
with open(self._path("file2"),"wb") as f:
f.write(data2.encode("ascii"))
return files_differ(self._path("file1"),self._path("file2"),start,stop)
def test_files_differ(self):
assert self._differs("one","two")
assert self._differs("onethreetwo","twothreeone")
assert self._differs("onethreetwo","twothreeone",3)
assert not self._differs("onethreetwo","twothreeone",3,-3)
assert self._differs("onethreetwo","twothreeone",2,-3)
assert self._differs("onethreetwo","twothreeone",3,-2)
def tearDown(self):
really_rmtree(self.tdir)
|
|
import os
import re
import shutil
import urllib2
import templates
import setuptools
import subprocess
import zc.recipe.egg
from zc.buildout import UserError
from dist_installer import install
class Installer:
def __init__(self, options, buildout, log, name):
self.options = options
self.buildout = buildout
self.log = log
self.name = name
def get_extra_paths(self):
basic_dir = os.path.join(self.buildout['buildout']['directory'], 'src')
local_apps_dir = os.path.join(basic_dir, self.options['project'], self.options['local-apps'])
external_apps_dir = os.path.join(basic_dir, self.options['project'], self.options['external-apps'])
extra_paths = [self.options['location'],
self.buildout['buildout']['directory'],
local_apps_dir, external_apps_dir]
pythonpath = [p.replace('/', os.path.sep) for p in
self.options['libs-path'].splitlines() if p.strip()]
extra_paths.extend(pythonpath)
return extra_paths
def command(self, cmd, **kwargs):
output = subprocess.PIPE
if self.buildout['buildout'].get('verbosity'):
output = None
command = subprocess.Popen(
cmd, shell=True, stdout=output, **kwargs)
return command.wait()
def create_file(self, file, template, options=None):
f = open(file, 'w')
if options is not None:
f.write(template % options)
else:
f.write(template)
f.close()
def make_scripts(self, location, extra_paths, ws):
scripts = []
if self.options['turboengine'] != '':
self.create_file(os.path.join(location, self.options['project'],'app.py'), templates.configs['turboengine']['app_py'], {'local': self.options['local-apps'],
'external': self.options['external-apps'],
'lib': self.options['libs-path'],
'settings': self.options['settings']})
return scripts
def create_project(self, project_dir, project):
old_config = self.buildout._read_installed_part_options()[0]
if self.name in old_config:
old_config = old_config[self.name]
if 'project' in old_config and\
old_config['project'] != self.options['project']:
self.log.warning("GAEBuild: creating new project '%s', to replace previous project '%s'"%(self.options['project'], old_config['project']))
# saving current work directory
old_cwd = os.getcwd()
os.chdir(project_dir)
# importing current django instalation
os.makedirs(project)
os.chdir(project)
os.makedirs('docs')
os.makedirs('static/css')
os.makedirs('static/js')
os.makedirs('static/images')
os.makedirs('templates')
os.makedirs('i18n')
self.create_file("templates/base.html", templates.base_html)
self.create_file("templates/404.html", templates.t_404_html)
self.create_file("templates/500.html", templates.t_500_html)
self.create_file("app.yaml", templates.configs['common']['app_yaml'], {})
self.create_file("index.yaml", templates.configs['common']['index_yaml'], {})
self.create_file("cron.yaml", templates.configs['common']['cron_yaml'], {})
self.create_file("queue.yaml", templates.configs['common']['queue_yaml'], {})
self.create_file("dos.yaml", templates.configs['common']['dos_yaml'], {})
if self.options['turboengine'] == '':
self.create_file("app.py", templates.configs['gae']['app_py'], {'local': self.options['local-apps'],
'external': self.options['external-apps'],
'lib':self.options['libs-path']})
if self.options['turboengine'] != '':
os.makedirs('project')
self.create_file('project/__init__.py', '', {})
self.create_file("settings.py", '# Customize here settings', {})
self.create_file("project/development.py", templates.development_settings, {})
self.create_file("project/production.py", templates.production_settings, {})
if self.options['webservices'].lower() == 'true':
self.create_file("webservices.py", templates.configs['turboengine']['webservices_py'], {'local': self.options['local-apps'],
'external': self.options['external-apps'],
'lib':self.options['libs-path']})
# updating to original cwd
os.chdir(old_cwd)
def update_project_structure(self):
old_config = self.buildout._read_installed_part_options()[0]
# updating old config to project name
if self.name in old_config:
old_config = old_config[self.name]
if 'local-apps' in old_config and\
old_config['local-apps'] != self.options['local-apps']:
if os.path.exists(old_config['local-apps']):
self.log.info("GAEBuild: moving local-apps dir from % to %s"%(old_config['local-apps'], self.options['local-apps']))
shutil.move(old_config['local-apps'], self.options['local-apps'])
if 'external-apps' in old_config and\
old_config['external-apps'] != self.options['external-apps']:
if os.path.exists(old_config['external-apps']):
self.log.info("GAEBuild: moving external-apps dir from % to %s"%(old_config['external-apps'], self.options['external-apps']))
shutil.move(old_config['external-apps'], self.options['external-apps'])
if 'libs-path' in old_config and\
old_config['libs-path'] != self.options['libs-path']:
if os.path.exists(old_config['libs-path']):
self.log.info("GAEBuild: moving libs-path dir from % to %s"%(old_config['libs-path'], self.options['libs-path']))
shutil.move(old_config['libs-path'], self.options['libs-path'])
if 'script-dir' in old_config and\
old_config['script-dir'] != self.options['script-dir']:
if os.path.exists(old_config['script-dir']):
self.log.info("GAEBuild: moving script-dir dir from % to %s"%(old_config['script-dir'], self.options['script-dir']))
shutil.move(old_config['script-dir'], self.options['script-dir'])
if not os.path.exists(self.options['local-apps']):
self.log.info("GAEBuild: creating local-apps dir %s"%(self.options['local-apps']))
os.makedirs(self.options['local-apps'])
if not os.path.exists(self.options['external-apps']):
self.log.info("GAEBuild: creating external-apps dir %s"%(self.options['external-apps']))
os.makedirs(self.options['external-apps'])
if not os.path.exists(self.options['libs-path']):
self.log.info("GAEBuild: creating libs-path dir %s"%(self.options['libs-path']))
os.makedirs(self.options['libs-path'])
answer = raw_input("Do you want to install/update apps?(yes/no): ")
if answer.lower() == 'yes':
print '\n************** Intalling gae/django apps **************\n'
apps = self.options.get('apps', '').split()
if len(apps) == 0:
self.log.info('No apps to install')
else:
install_dir = os.path.abspath(self.options['external-apps'])
args = ['-U', '-b', self.buildout['buildout']['download-cache'], '-d', install_dir]
args.extend(apps)
links = self.options.get('find-links', '').split()
if len(links)>0:
links.insert(0, '-f')
args.extend(links)
install(args)
print '\n************** End intalling gae/django apps **************\n'
print '\n************** Intalling python projects **************\n'
apps = self.options.get('libs', '').split()
turboengine = self.options.get('turboengine', '')
if turboengine != '':
if turboengine.lower() == 'last' :
apps.append("turboengine")
else:
apps.append("turboengine==%s"%self.options.get('turboengine'))
if self.options.get('webservices').lower() == 'true':
apps.append("ZSI")
apps.append("zope.interface")
if len(apps) == 0:
self.log.info('No apps to install')
else:
from setuptools.command.easy_install import main
install_dir = os.path.abspath(self.options['libs-path'])
if self.options.get('zipped').lower() == 'true':
args = ['-U', '-z', '-d', install_dir]
else:
args = ['-U', '-d', install_dir]
args.extend(['-s', self.options['script-dir']])
links = self.options.get('find-links', '').split()
if len(links)>0:
links.insert(0, '-f')
args.extend(links)
args.extend(apps)
previous_path = os.environ.get('PYTHONPATH', '')
if previous_path == '':
os.environ['PYTHONPATH'] = '%s'%(install_dir)
else:
os.environ['PYTHONPATH'] = '%s:%s'%(install_dir, previous_path)
main(args)# installing libs
if previous_path == '':
del os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = previous_path
print '\n************** End intalling python projects **************\n'
def verify_or_create_download_dir(self, download_dir):
if not os.path.exists(download_dir):
os.mkdir(download_dir)
def install_recipe(self, location):
self.options['setup'] = location
development = zc.recipe.egg.Develop(self.buildout,
self.options['recipe'],
self.options)
#development.install()
del self.options['setup']
def install_project(self, project_dir, project):
if not os.path.exists(os.path.join(project_dir, project)):
self.create_project(project_dir, project)
else:
self.log.info(
'Skipping creating of project: %(project)s since '
'it exists' % self.options)
# creating structure
old_cwd = os.getcwd()
os.chdir(os.path.join(project_dir, project))
self.update_project_structure()
os.chdir(old_cwd)
def install_scripts(self, location, extra_path, ws):
script_paths = []
# Make the wsgi and fastcgi scripts if enabled
script_paths.extend(self.make_scripts(location, extra_path, ws))
return script_paths
|
|
#!/usr/bin/python
import io # file streams library
import fcntl # I2C library
import time # sleep delay and timestamps library
import string # string parsing library
import smbus # I2C library
import Adafruit_DHT # DHT22 library
import numpy
# Google Cloud Pub Sub
from cloudconnector.com.plantos.gcp.authentication.authenticationhandler import AuthenticationHandler
from cloudconnector.com.plantos.gcp.pubsub.pubsubhandler import PubSubHandler
from cloudconnector.com.plantos.gcp.cloudstorage.storagehandler import CloudStorageHandler
#
# Atlas Scientific
#
class AtlasI2C:
long_timeout = 1.5 # the timeout needed to query readings and calibrations
short_timeout = 0.5 # timeout for regular commands
default_bus = 1 # the default bus for I2C on the newer Raspberry Pis, certain older boards use bus 0
default_address = 99 # the default address for the sensor
current_addr = default_address
def __init__(self, address=default_address, bus=default_bus):
# open two file streams, one for reading and one for writing
# the specific I2C channel is selected with bus
# it is usually 1, except for older revisions where its 0
# wb and rb indicate binary read and write
self.file_read = io.open("/dev/i2c-"+str(bus), "rb", buffering=0)
self.file_write = io.open("/dev/i2c-"+str(bus), "wb", buffering=0)
# initializes I2C to either a user specified or default address
self.set_i2c_address(address)
def set_i2c_address(self, addr):
# set the I2C communications to the slave specified by the address
# The commands for I2C dev using the ioctl functions are specified in
# the i2c-dev.h file from i2c-tools
I2C_SLAVE = 0x703
fcntl.ioctl(self.file_read, I2C_SLAVE, addr)
fcntl.ioctl(self.file_write, I2C_SLAVE, addr)
self.current_addr = addr
def write(self, cmd):
# appends the null character and sends the string over I2C
cmd += "\00"
self.file_write.write(cmd)
def read(self, num_of_bytes=31):
# reads a specified number of bytes from I2C, then parses and displays the result
res = self.file_read.read(num_of_bytes) # read from the board
response = filter(lambda x: x != '\x00', res) # remove the null characters to get the response
if ord(response[0]) == 1: # if the response isn't an error
# change MSB to 0 for all received characters except the first and get a list of characters
char_list = map(lambda x: chr(ord(x) & ~0x80), list(response[1:]))
# NOTE: having to change the MSB to 0 is a glitch in the raspberry pi, and you shouldn't have to do this!
if self.current_addr == 99: # pH sensor
return "pH: " + ''.join(char_list) # convert the char list to a string and returns it
elif self.current_addr == 100: # EC sensor
temp_string=''.join(char_list)
#print 'EC: ' + string.split(temp_string, ",")[0]
#print 'TDS: ' + string.split(temp_string, ",")[1]
#print 'Salinity: ' + string.split(temp_string, ",")[2]
#return "Gravity: " + string.split(temp_string, ",")[3] # convert the char list to a string and returns it
return "EC: " + string.split(temp_string, ",")[0] + "\nTDS: " + string.split(temp_string, ",")[1] + "\nSalinity: " + string.split(temp_string, ",")[2] + "\nGravity: " + string.split(temp_string, ",")[3] # convert the char list to a string and returns it
elif self.current_addr == 102: # RTD sensor
return "Soluble Temperature: " + ''.join(char_list) + " C" # convert the char list to a string and returns it
else:
return "Error " + str(ord(response[0]))
def query(self, string):
# write a command to the board, wait the correct timeout, and read the response
self.write(string)
# the read and calibration commands require a longer timeout
if((string.upper().startswith("R")) or
(string.upper().startswith("CAL"))):
time.sleep(self.long_timeout)
elif string.upper().startswith("SLEEP"):
return "sleep mode"
else:
time.sleep(self.short_timeout)
return self.read()
def close(self):
self.file_read.close()
self.file_write.close()
def list_i2c_devices(self):
prev_addr = self.current_addr # save the current address so we can restore it after
i2c_devices = []
for i in range (0,128):
try:
self.set_i2c_address(i)
self.read()
i2c_devices.append(i)
except IOError:
pass
self.set_i2c_address(prev_addr) # restore the address we were using
return i2c_devices
#
# BH1750
#
class BH1750():
# define constants
default_address = 0x23 # the default bus for I2C on the newer Raspberry Pis, certain older boards use bus 0
default_bus = 1 # the default address for the sensor
POWER_DOWN = 0x00 # No active state
POWER_ON = 0x01 # Power on
RESET = 0x07 # Reset data register value
CONTINUOUS_LOW_RES_MODE = 0x13 # Start measurement at 4lx resolution. Time typically 16ms
CONTINUOUS_HIGH_RES_MODE_1 = 0x10 # Start measurement at 1lx resolution. Time typically 120ms
CONTINUOUS_HIGH_RES_MODE_2 = 0x11 # Start measurement at 0.5lx resolution. Time typically 120ms
ONE_TIME_HIGH_RES_MODE_1 = 0x20 # Start measurement at 1lx resolution. Time typically 120ms. Device is automatically set to Power Down after measurement.
ONE_TIME_HIGH_RES_MODE_2 = 0x21 # Start measurement at 0.5lx resolution. Time typically 120ms. Device is automatically set to Power Down after measurement.
ONE_TIME_LOW_RES_MODE = 0x23 # Start measurement at 1lx resolution. Time typically 120ms. Device is automatically set to Power Down after measurement.
def __init__(self):
self.bus = smbus.SMBus(1)
def convertToNumber(self, data):
return ((data[1]+(256*data[0]))/1.2)
def readLight(self, addr=default_address):
data=self.bus.read_i2c_block_data(addr,self.ONE_TIME_HIGH_RES_MODE_1)
return self.convertToNumber(data)
#
# Raspberry Pi
#
class RasPi():
def getserial(self):
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
return cpuserial
#
# Main
#
def main():
# Google Cloud Pub Sub
auth_manager = AuthenticationHandler()
http = auth_manager.authenticate()
store_client = CloudStorageHandler(http)
#store_client.list_buckets() # display list of buckets in pubsub
pubsub_client = PubSubHandler()
#pubsub_client.list_topics() # display list of topics in pubsub
# Sensor Objects Definition
device = AtlasI2C() # Atlas Scientific sensors (ph,ec, rtd)
device1 = BH1750() # BH1750 light sensor
device2 = Adafruit_DHT.DHT22 # DHT22 temperature and humidity sensor
device3 = RasPi() # Raspberry Pi 3 Model B
pin = 24 # DHT22 data pin
gcp_timer = 1 * 60 # 5 minutes for GCP Pub/Sub
poll_timer = 10 # 10 seconds per sensor reading
device_serial = device3.getserial()
print "Device ID: " + device_serial
# Sensor Hard Limit Setting
hardlimit_low_humid = 0.0
hardlimit_high_humid = 100.0
hardlimit_low_temp = 10.0
hardlimit_high_temp = 50.0
hardlimit_low_ph = 1.0
hardlimit_high_ph = 13.0
hardlimit_low_ec = 0.0
hardlimit_high_ec = 10000.0
hardlimit_low_tds = 0.0
hardlimit_high_tds = 1000.0
hardlimit_low_sal = 0.0
hardlimit_high_sal = 1.0
hardlimit_low_gra = 0.5
hardlimit_high_gra = 1.5
hardlimit_low_rtd = 10.0
hardlimit_high_rtd = 40.0
hardlimit_low_lux = 0
hardlimit_high_lux = 1000.0
# initialize sensors (remove first light intensity error)
device1.readLight()
humidity, temperature = Adafruit_DHT.read_retry(device2, pin)
stat_dht22_temp = [float(temperature)] * ((gcp_timer/poll_timer)-1) # initialize local statistic
stat_dht22_humid = [float(humidity)] * ((gcp_timer/poll_timer)-1) # initialize local statistic
device.set_i2c_address(99) # (99 pH)
temp = device.query("R")
stat_atlas_ph = [float(string.split(temp, ": ")[1])] * ((gcp_timer/poll_timer)-1) # initialize local statistic
device.set_i2c_address(100) # (100 EC)
temp = device.query("R")
temp1 = string.split(temp, ": ")[1] #EC
stat_atlas_ec = [float(string.split(temp1, "\n")[0])] * ((gcp_timer/poll_timer)-1) # initialize local statistic
temp1 = string.split(temp, ": ")[2] #TDS
stat_atlas_tds = [float(string.split(temp1, "\n")[0])] * ((gcp_timer/poll_timer)-1) # initialize local statistic
temp1 = string.split(temp, ": ")[3] #Salinity
stat_atlas_sal = [float(string.split(temp1, "\n")[0])] * ((gcp_timer/poll_timer)-1) # initialize local statistic
temp1 = string.split(temp, ": ")[4] #Gravity
stat_atlas_gra = [float(string.split(temp1, "\n")[0])] * ((gcp_timer/poll_timer)-1) # initialize local statistic
device.set_i2c_address(102) # (102 RTD)
temp = device.query("R")
temp = string.split(temp, ": ")[1]
stat_atlas_rtd = [float(string.split(temp, " C")[0])] * ((gcp_timer/poll_timer)-1) # initialize local statistic
stat_bh1750_lux = [float(str(device1.readLight()))] * ((gcp_timer/poll_timer)-1) # initialize local statistic
# set up numpy arrays
stat_dht22_temp = numpy.array(stat_dht22_temp)
stat_dht22_humid = numpy.array(stat_dht22_humid)
stat_atlas_ph = numpy.array(stat_atlas_ph)
stat_atlas_ec = numpy.array(stat_atlas_ec)
stat_atlas_tds = numpy.array(stat_atlas_tds)
stat_atlas_sal = numpy.array(stat_atlas_sal)
stat_atlas_gra = numpy.array(stat_atlas_gra)
stat_atlas_rtd = numpy.array(stat_atlas_rtd)
stat_bh1750_lux = numpy.array(stat_bh1750_lux)
# main loop
while True:
# Capture input command
input = raw_input("Enter command: ")
if input.upper().startswith("LIST_ADDR"):
devices = device.list_i2c_devices() # obtain I2C devices address that are connected to the shield
for i in range(len (devices)):
print devices[i]
# continuous polling command automatically polls the board
elif input.upper().startswith("POLL"):
# Setup sensor polling timer and GCP PubSub timer
poll_time = time.time()
gcp_time = time.time()
try:
while True:
# Sensor Polling Task
if (time.time() - poll_time) >= poll_timer:
# reset sensor poll timer
poll_time = time.time()
humidity = None
temperature = None
humidity, temperature = Adafruit_DHT.read_retry(device2, pin) # DHT22
if humidity is not None and temperature is not None and (humidity>=hardlimit_low_humid and humidity<=hardlimit_high_humid) and (temperature>=hardlimit_low_temp and temperature<=hardlimit_high_temp):
print('Ambient Temperature: {0:0.1f} C \nAmbient Humidity: {1:0.1f} %'.format(temperature, humidity))
stat_dht22_temp = numpy.append(stat_dht22_temp, float(temperature)) #local statistic
stat_dht22_humid = numpy.append(stat_dht22_humid, float(humidity)) #local statistic
else:
print('DHT22 sensor error!')
stat_dht22_temp = numpy.median(stat_dht22_temp)
stat_dht22_humid = numpy.median(stat_dht22_humid)
device.set_i2c_address(99) # (99 pH)
temp = device.query("R")
print(temp)
stat_temporary = None
stat_temporary = float(string.split(temp, ": ")[1])
if stat_temporary is not None and (stat_temporary>=hardlimit_low_ph and stat_temporary<=hardlimit_high_ph):
stat_atlas_ph = numpy.append(stat_atlas_ph, stat_temporary) #local statistic
else:
print('Atlas PH sensor error!')
stat_atlas_ph = numpy.median(stat_atlas_ph)
device.set_i2c_address(100) # (100 EC)
temp = device.query("R")
print(temp)
temp1 = string.split(temp, ": ")[1] #get EC
stat_temporary = None
stat_temporary = float(string.split(temp1, "\n")[0])
if stat_temporary is not None and (stat_temporary>=hardlimit_low_ec and stat_temporary<=hardlimit_high_ec):
stat_atlas_ec = numpy.append(stat_atlas_ec, stat_temporary) #local statistic
else:
print('Atlas EC sensor error!')
stat_atlas_ec = numpy.median(stat_atlas_ec)
temp1 = string.split(temp, ": ")[2] #get TDS
stat_temporary = None
stat_temporary = float(string.split(temp1, "\n")[0])
if stat_temporary is not None and (stat_temporary>=hardlimit_low_tds and stat_temporary<=hardlimit_high_tds):
stat_atlas_tds = numpy.append(stat_atlas_tds, stat_temporary) #local statistic
else:
print('Atlas TDS sensor error!')
stat_atlas_tds = numpy.median(stat_atlas_tds)
temp1 = string.split(temp, ": ")[3] #get Salinity
stat_temporary = None
stat_temporary = float(string.split(temp1, "\n")[0])
if stat_temporary is not None and (stat_temporary>=hardlimit_low_sal and stat_temporary<=hardlimit_high_sal):
stat_atlas_sal = numpy.append(stat_atlas_sal, stat_temporary) #local statistic
else:
print('Atlas SAL sensor error!')
stat_atlas_sal = numpy.median(stat_atlas_sal)
temp1 = string.split(temp, ": ")[4] #get Gravity
stat_temporary = None
stat_temporary = float(string.split(temp1, "\n")[0])
if stat_temporary is not None and (stat_temporary>=hardlimit_low_gra and stat_temporary<=hardlimit_high_gra):
stat_atlas_gra = numpy.append(stat_atlas_gra, stat_temporary) #local statistic
else:
print('Atlas GRA sensor error!')
stat_atlas_gra = numpy.median(stat_atlas_gra)
device.set_i2c_address(102) # (102 RTD)
temp = device.query("R")
print(temp)
temp = string.split(temp, ": ")[1]
stat_temporary = None
stat_temporary = float(string.split(temp, " C")[0])
if stat_temporary is not None and (stat_temporary>=hardlimit_low_rtd and stat_temporary<=hardlimit_high_rtd):
stat_atlas_rtd = numpy.append(stat_atlas_rtd, stat_temporary) #local statistic
else:
print('Atlas RTD sensor error!')
stat_atlas_rtd = numpy.median(stat_atlas_rtd)
temp = str(device1.readLight()) # BH1750
print 'Light Intensity: ' + temp + ' lx'
temp = float(temp)
if temp is not None and (temp>=hardlimit_low_lux and temp<=hardlimit_high_lux):
stat_bh1750_lux = numpy.append(stat_bh1750_lux, temp) #local statistic
else:
print('BH1750 sensor error!')
stat_bh1750_lux = numpy.median(stat_bh1750_lux)
# local statistic routine
# compute mean
print 'Local Statistic at ' + (time.strftime('%d/%m/%Y %H:%M:%S'))
print '[Average] Ambient Temperature: ' + str(numpy.mean(stat_dht22_temp)) + ' C'
print '[Average] Ambient Humidity: ' + str(numpy.mean(stat_dht22_humid)) + ' %'
print '[Average] pH: ' + str(numpy.mean(stat_atlas_ph))
print '[Average] EC: ' + str(numpy.mean(stat_atlas_ec))
print '[Average] TDS: ' + str(numpy.mean(stat_atlas_tds))
print '[Average] Salinity: ' + str(numpy.mean(stat_atlas_sal))
print '[Average] Gravity: ' + str(numpy.mean(stat_atlas_gra))
print '[Average] RTD: ' + str(numpy.mean(stat_atlas_rtd))
print '[Average] Light Intensity: ' + str(numpy.mean(stat_bh1750_lux)) + ' lx'
# compute median
print '[Median] Ambient Temperature: ' + str(numpy.median(stat_dht22_temp)) + ' C'
print '[Median] Ambient Humidity: ' + str(numpy.median(stat_dht22_humid)) + ' %'
print '[Median] pH: ' + str(numpy.median(stat_atlas_ph))
print '[Median] EC: ' + str(numpy.median(stat_atlas_ec))
print '[Median] TDS: ' + str(numpy.median(stat_atlas_tds))
print '[Median] Salinity: ' + str(numpy.median(stat_atlas_sal))
print '[Median] Gravity: ' + str(numpy.median(stat_atlas_gra))
print '[Median] RTD: ' + str(numpy.median(stat_atlas_rtd))
print '[Median] Light Intensity: ' + str(numpy.median(stat_bh1750_lux)) + ' lx'
# truncate local statistic arrays
stat_dht22_temp = numpy.delete(stat_dht22_temp, 0)
stat_dht22_humid = numpy.delete(stat_dht22_humid, 0)
stat_atlas_ph = numpy.delete(stat_atlas_ph, 0)
stat_atlas_ec = numpy.delete(stat_atlas_ec, 0)
stat_atlas_tds = numpy.delete(stat_atlas_tds, 0)
stat_atlas_sal = numpy.delete(stat_atlas_sal, 0)
stat_atlas_gra = numpy.delete(stat_atlas_gra, 0)
stat_atlas_rtd = numpy.delete(stat_atlas_rtd, 0)
stat_bh1750_lux = numpy.delete(stat_bh1750_lux, 0)
# Google PubSub Task
# reset GCP PubSub timer
gcp_time = time.time()
# append GCP PubSub attribute
timestamp = time.strftime('%d/%m/%Y %H:%M:%S')
attr = {
"time": timestamp,
"temperature": numpy.median(stat_dht22_temp),
"humidity": numpy.median(stat_dht22_humid),
"ph": numpy.median(stat_atlas_ph),
"ec": numpy.median(stat_atlas_ec),
"tds": numpy.median(stat_atlas_tds),
"sal": numpy.median(stat_atlas_sal),
"gravity": numpy.median(stat_atlas_gra),
"rtd": numpy.median(stat_atlas_rtd),
"light_intensity": numpy.median(stat_bh1750_lux),
"device_id": device_serial}
# append GCP PubSub data
data = '{"time":"%s","temperature":"%.2f","humidity":"%.2f","ph":"%.2f","ec":"%.2f","tds":"%.2f","sal":"%.2f","gravity":"%.2f","rtd":"%.2f","light_intensity":"%.2f","device_id":"%s"}' % (
timestamp, numpy.median(stat_dht22_temp), numpy.median(stat_dht22_humid),
numpy.median(stat_atlas_ph), numpy.median(stat_atlas_ec),
numpy.median(stat_atlas_tds),
numpy.median(stat_atlas_sal), numpy.median(stat_atlas_gra),
numpy.median(stat_atlas_rtd), numpy.median(stat_bh1750_lux), str(device_serial))
# append GCP PubSub data
data = data.encode('utf-8')
# pubsub_client.publish_data(topic_name="plantos_ingest_data", data=data, attr=attr)
pubsub_client.publish_data(topic_name="plantos_iot_ingest", data=data, attr=attr)
except KeyboardInterrupt: # catches the ctrl-c command, which breaks the loop above
print("Continuous polling stopped")
# if not a special keyword, pass commands straight to board
else:
if len(input) == 0:
print "Please input valid command."
else:
try:
print(device.query(input))
except IOError:
print("Query failed \n - Address may be invalid, use List_addr command to see available addresses")
if __name__ == '__main__':
main()
|
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.client import SQLClient
from ibis.expr.datatypes import Schema
import ibis
class MockConnection(SQLClient):
_tables = {
'alltypes': [
('a', 'int8'),
('b', 'int16'),
('c', 'int32'),
('d', 'int64'),
('e', 'float'),
('f', 'double'),
('g', 'string'),
('h', 'boolean'),
('i', 'timestamp')
],
'star1': [
('c', 'int32'),
('f', 'double'),
('foo_id', 'string'),
('bar_id', 'string'),
],
'star2': [
('foo_id', 'string'),
('value1', 'double'),
('value3', 'double')
],
'star3': [
('bar_id', 'string'),
('value2', 'double')
],
'test1': [
('c', 'int32'),
('f', 'double'),
('g', 'string')
],
'test2': [
('key', 'string'),
('value', 'double')
],
'tpch_region': [
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string')
],
'tpch_nation': [
('n_nationkey', 'int16'),
('n_name', 'string'),
('n_regionkey', 'int16'),
('n_comment', 'string')
],
'tpch_lineitem': [
('l_orderkey', 'int64'),
('l_partkey', 'int64'),
('l_suppkey', 'int64'),
('l_linenumber', 'int32'),
('l_quantity', 'decimal(12,2)'),
('l_extendedprice', 'decimal(12,2)'),
('l_discount', 'decimal(12,2)'),
('l_tax', 'decimal(12,2)'),
('l_returnflag', 'string'),
('l_linestatus', 'string'),
('l_shipdate', 'string'),
('l_commitdate', 'string'),
('l_receiptdate', 'string'),
('l_shipinstruct', 'string'),
('l_shipmode', 'string'),
('l_comment', 'string')
],
'tpch_customer': [
('c_custkey', 'int64'),
('c_name', 'string'),
('c_address', 'string'),
('c_nationkey', 'int16'),
('c_phone', 'string'),
('c_acctbal', 'decimal'),
('c_mktsegment', 'string'),
('c_comment', 'string')
],
'tpch_orders': [
('o_orderkey', 'int64'),
('o_custkey', 'int64'),
('o_orderstatus', 'string'),
('o_totalprice', 'decimal(12,2)'),
('o_orderdate', 'string'),
('o_orderpriority', 'string'),
('o_clerk', 'string'),
('o_shippriority', 'int32'),
('o_comment', 'string')
],
'functional_alltypes': [
('id', 'int32'),
('bool_col', 'boolean'),
('tinyint_col', 'int8'),
('smallint_col', 'int16'),
('int_col', 'int32'),
('bigint_col', 'int64'),
('float_col', 'float'),
('double_col', 'double'),
('date_string_col', 'string'),
('string_col', 'string'),
('timestamp_col', 'timestamp'),
('year', 'int32'),
('month', 'int32')
],
'airlines': [
('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('dayofweek', 'int32'),
('dep_time', 'int32'),
('crs_dep_time', 'int32'),
('arr_time', 'int32'),
('crs_arr_time', 'int32'),
('carrier', 'string'),
('flight_num', 'int32'),
('tail_num', 'int32'),
('actual_elapsed_time', 'int32'),
('crs_elapsed_time', 'int32'),
('airtime', 'int32'),
('arrdelay', 'int32'),
('depdelay', 'int32'),
('origin', 'string'),
('dest', 'string'),
('distance', 'int32'),
('taxi_in', 'int32'),
('taxi_out', 'int32'),
('cancelled', 'int32'),
('cancellation_code', 'string'),
('diverted', 'int32'),
('carrier_delay', 'int32'),
('weather_delay', 'int32'),
('nas_delay', 'int32'),
('security_delay', 'int32'),
('late_aircraft_delay', 'int32')
],
'tpcds_customer': [
('c_customer_sk', 'int64'),
('c_customer_id', 'string'),
('c_current_cdemo_sk', 'int32'),
('c_current_hdemo_sk', 'int32'),
('c_current_addr_sk', 'int32'),
('c_first_shipto_date_sk', 'int32'),
('c_first_sales_date_sk', 'int32'),
('c_salutation', 'string'),
('c_first_name', 'string'),
('c_last_name', 'string'),
('c_preferred_cust_flag', 'string'),
('c_birth_day', 'int32'),
('c_birth_month', 'int32'),
('c_birth_year', 'int32'),
('c_birth_country', 'string'),
('c_login', 'string'),
('c_email_address', 'string'),
('c_last_review_date', 'string')],
'tpcds_customer_address': [
('ca_address_sk', 'bigint'),
('ca_address_id', 'string'),
('ca_street_number', 'string'),
('ca_street_name', 'string'),
('ca_street_type', 'string'),
('ca_suite_number', 'string'),
('ca_city', 'string'),
('ca_county', 'string'),
('ca_state', 'string'),
('ca_zip', 'string'),
('ca_country', 'string'),
('ca_gmt_offset', 'decimal(5,2)'),
('ca_location_type', 'string')],
'tpcds_customer_demographics': [
('cd_demo_sk', 'bigint'),
('cd_gender', 'string'),
('cd_marital_status', 'string'),
('cd_education_status', 'string'),
('cd_purchase_estimate', 'int'),
('cd_credit_rating', 'string'),
('cd_dep_count', 'int'),
('cd_dep_employed_count', 'int'),
('cd_dep_college_count', 'int')],
'tpcds_date_dim': [
('d_date_sk', 'bigint'),
('d_date_id', 'string'),
('d_date', 'string'),
('d_month_seq', 'int'),
('d_week_seq', 'int'),
('d_quarter_seq', 'int'),
('d_year', 'int'),
('d_dow', 'int'),
('d_moy', 'int'),
('d_dom', 'int'),
('d_qoy', 'int'),
('d_fy_year', 'int'),
('d_fy_quarter_seq', 'int'),
('d_fy_week_seq', 'int'),
('d_day_name', 'string'),
('d_quarter_name', 'string'),
('d_holiday', 'string'),
('d_weekend', 'string'),
('d_following_holiday', 'string'),
('d_first_dom', 'int'),
('d_last_dom', 'int'),
('d_same_day_ly', 'int'),
('d_same_day_lq', 'int'),
('d_current_day', 'string'),
('d_current_week', 'string'),
('d_current_month', 'string'),
('d_current_quarter', 'string'),
('d_current_year', 'string')],
'tpcds_household_demographics': [
('hd_demo_sk', 'bigint'),
('hd_income_band_sk', 'int'),
('hd_buy_potential', 'string'),
('hd_dep_count', 'int'),
('hd_vehicle_count', 'int')],
'tpcds_item': [
('i_item_sk', 'bigint'),
('i_item_id', 'string'),
('i_rec_start_date', 'string'),
('i_rec_end_date', 'string'),
('i_item_desc', 'string'),
('i_current_price', 'decimal(7,2)'),
('i_wholesale_cost', 'decimal(7,2)'),
('i_brand_id', 'int'),
('i_brand', 'string'),
('i_class_id', 'int'),
('i_class', 'string'),
('i_category_id', 'int'),
('i_category', 'string'),
('i_manufact_id', 'int'),
('i_manufact', 'string'),
('i_size', 'string'),
('i_formulation', 'string'),
('i_color', 'string'),
('i_units', 'string'),
('i_container', 'string'),
('i_manager_id', 'int'),
('i_product_name', 'string')],
'tpcds_promotion': [
('p_promo_sk', 'bigint'),
('p_promo_id', 'string'),
('p_start_date_sk', 'int'),
('p_end_date_sk', 'int'),
('p_item_sk', 'int'),
('p_cost', 'decimal(15,2)'),
('p_response_target', 'int'),
('p_promo_name', 'string'),
('p_channel_dmail', 'string'),
('p_channel_email', 'string'),
('p_channel_catalog', 'string'),
('p_channel_tv', 'string'),
('p_channel_radio', 'string'),
('p_channel_press', 'string'),
('p_channel_event', 'string'),
('p_channel_demo', 'string'),
('p_channel_details', 'string'),
('p_purpose', 'string'),
('p_discount_active', 'string')],
'tpcds_store': [
('s_store_sk', 'bigint'),
('s_store_id', 'string'),
('s_rec_start_date', 'string'),
('s_rec_end_date', 'string'),
('s_closed_date_sk', 'int'),
('s_store_name', 'string'),
('s_number_employees', 'int'),
('s_floor_space', 'int'),
('s_hours', 'string'),
('s_manager', 'string'),
('s_market_id', 'int'),
('s_geography_class', 'string'),
('s_market_desc', 'string'),
('s_market_manager', 'string'),
('s_division_id', 'int'),
('s_division_name', 'string'),
('s_company_id', 'int'),
('s_company_name', 'string'),
('s_street_number', 'string'),
('s_street_name', 'string'),
('s_street_type', 'string'),
('s_suite_number', 'string'),
('s_city', 'string'),
('s_county', 'string'),
('s_state', 'string'),
('s_zip', 'string'),
('s_country', 'string'),
('s_gmt_offset', 'decimal(5,2)'),
('s_tax_precentage', 'decimal(5,2)')],
'tpcds_store_sales': [
('ss_sold_time_sk', 'bigint'),
('ss_item_sk', 'bigint'),
('ss_customer_sk', 'bigint'),
('ss_cdemo_sk', 'bigint'),
('ss_hdemo_sk', 'bigint'),
('ss_addr_sk', 'bigint'),
('ss_store_sk', 'bigint'),
('ss_promo_sk', 'bigint'),
('ss_ticket_number', 'int'),
('ss_quantity', 'int'),
('ss_wholesale_cost', 'decimal(7,2)'),
('ss_list_price', 'decimal(7,2)'),
('ss_sales_price', 'decimal(7,2)'),
('ss_ext_discount_amt', 'decimal(7,2)'),
('ss_ext_sales_price', 'decimal(7,2)'),
('ss_ext_wholesale_cost', 'decimal(7,2)'),
('ss_ext_list_price', 'decimal(7,2)'),
('ss_ext_tax', 'decimal(7,2)'),
('ss_coupon_amt', 'decimal(7,2)'),
('ss_net_paid', 'decimal(7,2)'),
('ss_net_paid_inc_tax', 'decimal(7,2)'),
('ss_net_profit', 'decimal(7,2)'),
('ss_sold_date_sk', 'bigint')],
'tpcds_time_dim': [
('t_time_sk', 'bigint'),
('t_time_id', 'string'),
('t_time', 'int'),
('t_hour', 'int'),
('t_minute', 'int'),
('t_second', 'int'),
('t_am_pm', 'string'),
('t_shift', 'string'),
('t_sub_shift', 'string'),
('t_meal_time', 'string')]
}
def __init__(self):
self.executed_queries = []
def _get_table_schema(self, name):
name = name.replace('`', '')
return Schema.from_tuples(self._tables[name])
def _build_ast(self, expr):
from ibis.impala.compiler import build_ast
return build_ast(expr)
def execute(self, expr, limit=None, async=False):
if async:
raise NotImplementedError
ast = self._build_ast_ensure_limit(expr, limit)
for query in ast.queries:
self.executed_queries.append(query.compile())
return None
def compile(self, expr, limit=None):
ast = self._build_ast_ensure_limit(expr, limit)
queries = [q.compile() for q in ast.queries]
return queries[0] if len(queries) == 1 else queries
_all_types_schema = [
('a', 'int8'),
('b', 'int16'),
('c', 'int32'),
('d', 'int64'),
('e', 'float'),
('f', 'double'),
('g', 'string'),
('h', 'boolean')
]
class BasicTestCase(object):
def setUp(self):
self.schema = _all_types_schema
self.schema_dict = dict(self.schema)
self.table = ibis.table(self.schema, 'schema')
self.int_cols = ['a', 'b', 'c', 'd']
self.bool_cols = ['h']
self.float_cols = ['e', 'f']
self.con = MockConnection()
|
|
# Program by Ankur Gupta
# www.github.com/agupta231
# Feb 2017
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import numpy
import svgwrite
from config import Config
class Cut:
def __init__(self, iteration, cut_type):
self.iteration = iteration
self.length = Config.initial_cube_size * Config.iteration_multiplier ** (iteration - 1)
self.type = cut_type
self.id = numpy.random.randint(0, 999999999)
self.__generate_tabs()
def generate_bounding_box(self, drawing, starting_pos, shape_id):
dwg = drawing.g(id=shape_id, style="font-size: 0.5")
dwg.add(drawing.rect(
insert=tuple(starting_pos),
size=(str(self.length), str(self.length)),
stroke_width=Config.stroke_thickness,
stroke=Config.cube_color,
fill="none"
))
dwg.add(drawing.text(
str(shape_id),
insert=tuple(starting_pos),
))
return dwg
def generate_cut(self, drawing, starting_pos):
self.drawing = drawing
if self.type == "a":
return self.__gen_cut_a(starting_pos)
elif self.type == "b":
return self.__gen_cut_b(starting_pos)
elif self.type == "c":
return self.__gen_cut_c(starting_pos)
elif self.type == "a90":
return self.__gen_cut_a90(starting_pos)
elif self.type == "b90":
return self.__gen_cut_b90(starting_pos)
elif self.type == "c90":
return self.__gen_cut_c90(starting_pos)
else:
return None
def __generate_tabs(self):
if math.floor(self.length) >= 3:
self.tab_count = math.floor(self.length)
if self.tab_count % 2 != 1:
self.tab_count -= 1
else:
self.tab_count = 3
self.tab_count = int(self.tab_count)
self.tab_width = self.length / self.tab_count
def __gen_cut_a(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = starting_pos + numpy.array([self.tab_width, Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Bottom Edge
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Right Edge
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
last_pos += numpy.array([0, -(self.tab_width - Config.material_thickness)])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = starting_pos + numpy.array([self.length - self.tab_width, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
# Bottom left corner
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
last_pos += numpy.array([-(self.tab_width - Config.material_thickness), 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
# Bottom right corner
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
last_pos += numpy.array([0, self.tab_width - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_cut_b(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
# Left Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([0, self.length])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length, 0])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
return shape
def __gen_cut_c(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, 0])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, self.length])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = list(starting_pos) + numpy.array([self.length - self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
# Bottom left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Bottom right corner
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_cut_a90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = starting_pos + numpy.array([self.tab_width, Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Bottom Edge
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Right Edge
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
last_pos += numpy.array([0, -(self.tab_width - Config.material_thickness)])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = starting_pos + numpy.array([self.length - self.tab_width, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
# Bottom left corner
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
last_pos += numpy.array([-(self.tab_width - Config.material_thickness), 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
# Bottom right cutout
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, (self.length - self.tab_width) / 2])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-self.length / 2, 0])))
last_pos += numpy.array([-self.length / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2])))
last_pos += numpy.array([0, self.length / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width / 2, 0])))
last_pos += numpy.array([-self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
return shape
def __gen_cut_b90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
# Left Edge
last_pos = list(starting_pos)
for i in xrange(int(math.floor(self.tab_count / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.length, self.length])
for i in xrange(int(math.floor(self.tab_count / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width, 0])))
last_pos += numpy.array([-self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width, 0])))
last_pos += numpy.array([-self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length, 0])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
# Bottom Left cutout
last_pos = list(starting_pos) + numpy.array([0, (self.length - self.tab_width) / 2])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([self.length / 2 - Config.material_thickness, 0])))
last_pos += numpy.array([self.length / 2 - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2 - Config.material_thickness])))
last_pos += numpy.array([0, self.length / 2 - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width / 2, 0])))
last_pos += numpy.array([self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
return shape
def __gen_cut_c90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, 0])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, self.length])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
# Top left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right cutout
last_pos = list(starting_pos) + numpy.array([(self.length - self.tab_width) / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width / 2, 0])))
last_pos += numpy.array([self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2 - Config.material_thickness])))
last_pos += numpy.array([0, self.length / 2 - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.length / 2, 0])))
last_pos += numpy.array([self.length / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
# Bottom left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Bottom right corner
last_pos = list(starting_pos) + numpy.array(
[self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_line(self, start_array, translation_array):
return self.drawing.line(tuple(start_array), tuple(start_array + translation_array),
stroke=Config.cube_color,
stroke_width=Config.stroke_thickness)
|
|
# -*- coding: utf-8 -*-
import datetime
import collections
from .config import Config
class Stmt(object):
"""Base class for all statement classes.
"""
def __init__(self, placeholder=None, quote_all_col_refs=None, quote_all_values=None, **kwargs):
"""Constructor
Keyword Arguments:
placeholder (string, optional): Placeholder character to use when parameterization is enabled.
Default is None, in which case the :py:class:`mysqlstmt.config.Config` setting will be used.
quote_all_col_refs (bool, optional): Quote all column references.
Default is None, in which case the :py:class:`mysqlstmt.config.Config` setting will be used.
quote_all_values (bool, optional): The predicate for the outer WHERE condition, either 'AND' or 'OR'.
Default is None, in which case the :py:class:`mysqlstmt.config.Config` setting will be used.
**kwargs: Base class arguments.
Note:
Default settings for ``placeholder``, ``quote_all_col_refs`` and ``quote_all_values``
are set through :py:class:`mysqlstmt.config.Config`
"""
super(Stmt, self).__init__(**kwargs)
if placeholder is False or Config.placeholder is False:
self.placeholder = False
else:
self.placeholder = Config.placeholder if placeholder is None else placeholder
if quote_all_values is False or Config.quote_all_values is False:
self.quote_all_values = False
else:
self.quote_all_values = Config.quote_all_values if quote_all_values is None else quote_all_values
if quote_all_col_refs is False or Config.quote_all_col_refs is False:
self.quote_all_col_refs = False
else:
self.quote_all_col_refs = Config.quote_all_col_refs if quote_all_col_refs is None else quote_all_col_refs
# Public properties
self.query_options = [] # can append with ``set_option``
def __call__(self, *args, **kwargs):
"""Returns SQL statement created by :py:meth:`sql`"""
return self.sql()
def __str__(self):
"""Returns SQL statement created by :py:meth:`sql`"""
sql_t = self.sql()
return sql_t[0] if self.placeholder else sql_t
def sql(self):
"""Derived classes must override and build appropriate SQL statement.
Returns:
Either a tuple ``(SQL statement, parameterized values)`` if ``placeholder`` is set,
otherwise SQL statement as string.
Raises:
ValueError: The statement cannot be created with the given attributes.
NotImplementedError: There is no base class implementation.
"""
raise NotImplementedError
def quote_col_ref(self, col_ref):
"""Quote column reference with backticks.
Arguments:
col_ref (string): Column reference. Can be prefixed with the table name.
Returns:
string: Column reference quoted with backticks (``).
Notes:
Column reference will not be quoted if it contains a backtick, space or parenthesis.
"""
if self.quote_all_col_refs:
if ' ' in col_ref:
return col_ref # COLUMN AS ALIAS
if '(' in col_ref:
return col_ref # FUNCTION(COLUMN)
if '`' in col_ref:
return col_ref # already quoted
col_ref_parts = col_ref.split('.')
if len(col_ref_parts) > 1:
table, col = col_ref_parts
return '{0}.`{1}`'.format(table, col)
else:
return '`{0}`'.format(col_ref)
return col_ref
def pickle(self, val):
"""Convert variable value into a value that can be included in a SQL statement.
Arguments:
val (mixed): Value to pickle.
Returns:
tuple: (string, bool) Pickled value as a string and True if value should be parameterized.
"""
if val is None:
return 'NULL', False
elif val is True:
return '1', False
elif val is False:
return '0', False
elif isinstance(val, basestring):
return val, True
elif isinstance(val, (int, long, float)):
return str(val), False
elif isinstance(val, datetime.datetime):
return val.strftime('%Y-%m-%d %H:%M:%S'), True
elif isinstance(val, datetime.date):
return val.strftime('%Y-%m-%d'), True
elif isinstance(val, datetime.time):
return val.strftime('%H:%M:%S'), True
return unicode(val), True
@staticmethod
def quote(val):
"""Quotes a string with single quotemarks and adds backslashes to escape embedded single quotes.
Arguments:
val (string): Column reference. Can be prefixed with the table name.
Returns:
string: Column reference quoted with backticks (``).
Note:
This is a very simple implementation. Conventional wisdom says you should *never* need
to use this functionality. Whenever possible you should use parameterization,
or escape values before they get to creating SQL statments.
"""
return u"'{0}'".format(val.replace("'", "\\'"))
@staticmethod
def table_alias(table_factor):
"""Returns the table alias from a table factor.
Arguments:
table_factor (string): Table factor reference such as ``table`` or ``table AS alias``.
Returns:
string
"""
table_parts = table_factor.split('AS')
return table_factor if len(table_parts) == 1 else table_parts[1].strip()
def _parameterize_values(self, list_or_value, inline_values, param_values):
"""Parameterizes a value or list of values.
Evaluates or iterates through ``list_or_value`` and if the value can be parameterized
it is added to ``param_values``, otherwise it is added to ``inline_values``.
Arguments:
list_or_value (list or mixed): A value or list of values to replace with ``placeholder``.
inline_values (list or None): List to append non-parameterized values to;
set to None to force everything to be parameterized.
param_values (list or None): List to append parameterized values to;
set to None to force everything not to be inlined.
"""
if isinstance(list_or_value, collections.Iterable) and not isinstance(list_or_value, basestring):
for val in list_or_value:
self._parameterize_values(val, inline_values, param_values)
else:
using_placeholder = False if (param_values is None) else bool(self.placeholder)
quote = False if using_placeholder is True else self.quote_all_values
list_or_value, can_paramize_val = self.pickle(list_or_value)
if inline_values is None:
param_values.append(list_or_value)
elif can_paramize_val and using_placeholder:
inline_values.append(self.placeholder)
param_values.append(list_or_value)
elif can_paramize_val and quote:
inline_values.append(self.quote(list_or_value))
else:
inline_values.append(list_or_value)
def set_option(self, list_or_value):
"""Sets query options (the keywords at the beginning of the SQL statement).
Arguments:
list_or_value (list or mixed): An option or list of options.
Returns:
object: self
"""
if isinstance(list_or_value, collections.Iterable) and not isinstance(list_or_value, basestring):
for val in list_or_value:
self.set_option(val)
else:
self.query_options.append(list_or_value)
return self
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# https://stackoverflow.com/questions/13437727/python-write-to-excel-spreadsheet
#
import struct
import os
import sys
import gzip
import re
reload(sys)
sys.setdefaultencoding('utf8')
if (len(sys.argv) < 2):
print "Usage: ./parse.py filename.tsr ... "
sys.exit()
#FILE_NAME = "video"
#FILE_NAME = "server"
#FILE_NAME = "client"
team_1_score = 0
team_2_score = 0
is_server_video = False
players_have_teams = False
# All players for all videos
all_players = {}
# Players for single video
players = {}
class Player(object):
name = ""
index = -1
team = -1
games_played = 0
wins = 0
losses = 0
total_goals = 0
total_assists = 0
def __init__(self, name):
self.name = name
#print name
def win_percentage(self):
if self.games_played == 0:
return 100.0
else:
return float(self.wins) / float(self.games_played) * 100.0
def goals_per_game(self):
if self.games_played == 0:
return 0
else:
return float(self.total_goals) / float(self.games_played)
def assists_per_game(self):
if self.games_played == 0:
return 0
else:
return float(self.total_assists) / float(self.games_played)
def dump_stats(self):
print self.name + ":"
print "games played: " + str(self.games_played)
print "wins: " + str(self.wins)
print "losses: " + str(self.losses)
print "win percentage: %.0f%%" % self.win_percentage()
print "goals: " + str(self.total_goals)
print "goals per game: %.1f" % self.goals_per_game()
print "assists: " + str(self.total_assists)
print "assists per game: %.1f" % self.assists_per_game()
def printAsHex(data):
print(':'.join(x.encode('hex') for x in data))
def readByte(data, index):
return struct.unpack('>b', data[index:index+1])[0]
def readShort(data, index):
return struct.unpack('>h', data[index:index+2])[0]
def readInteger(data, index):
return struct.unpack('>I', data[index:index+4])[0]
def readFloat(data, index):
return struct.unpack('>f', data[index:index+4])[0]
def playerByIndex(player_index):
for player in players.values():
if player.index == player_index:
return player
return 0
def fileSize(filename):
st = os.stat(filename)
return st.st_size
def createPlayer(data, player_index):
index = 0
hash = readInteger(data, index)
index += 4
#int primary_color: 3 bytes
#int secondary_color: 3 bytes
#int tertiary_color: 3 bytes
index += 9
#bool is_ghost: 1 byte
index += 1
#int color_style: 1 byte
index += 1
player_name = str(data[index:index+17])
player_name = player_name.strip()
player_name = player_name.strip("\0")
#print "player_name: " + player_name
if (player_name in all_players.keys()):
#print "player " + player_name + " already exists"
player = all_players[player_name]
else:
#print "creating player " + str(player_index) + " : " + player_name
player = Player(player_name)
all_players[player_name] = player
players[player_name] = player
player.index = player_index
#print player
def handleFileHeader(length, data):
video_file_type = readShort(data, 0)
#print hex(video_file_type)
global is_server_video
if video_file_type == 0x07b8:
is_server_video = True
else:
is_server_video = False
#print "is_server_video: " + str(is_server_video)
def handleHeader(length, header):
#print "header"
header_length = len(str(header))
#print "header len: " + str(header_length)
if header_length < 26:
return
lap_number = readShort(header, 0)
track_data_length = readInteger(header, 2)
track_hash = header[6:6+16]
track_name_length = readInteger(header, 22)
#print lap_number
#print track_data_length
#print track_hash
#print track_name_length
track_name = str(header[26:26+track_name_length])
#print "track_name: " + track_name
index = 26 + track_name_length
track_maker_length = readInteger(header, index)
index += 4
track_maker = str(header[index:index+track_maker_length])
#print "track_maker " + str(track_maker)
index += track_maker_length
# player_data_length: 4 bytes
player_data_length = readInteger(header, index)
index += 4
# version: 4 bytes
version = readInteger(header, index)
index += 4
# player_count: 4 bytes
player_count = readInteger(header, index)
index += 4
#print "player_count: " + str(player_count)
# unknown: 12 bytes
index = index + 12
# Last player is "PunaBall"
for i in range(player_count - 1):
#print "i"+str(i)
createPlayer(header[index:index+32], i)
index += 32
def handleScore(new_score_1, new_score_2, player, assistee):
global team_1_score
global team_2_score
if (team_1_score != new_score_1):
team_1_score = new_score_1
elif team_2_score != new_score_2:
team_2_score = new_score_2
if player:
player.total_goals += 1
if assistee:
assistee.total_assists += 1
def handleFinalScore(team_1_score, team_2_score):
#print "team_1_score = " + str(team_1_score)
#print "team_2_score = " + str(team_2_score)
if (team_1_score > team_2_score):
winning_team = 0
elif (team_2_score > team_1_score):
winning_team = 1
else: # DRAW
winning_team = 2
for player in players.values():
player.games_played += 1
if winning_team != 2:
if (player.team == winning_team):
player.wins += 1
else:
player.losses += 1
def handleChat(length, data):
msg = str(data[4:length - 5])
msg = str(msg.strip()).decode('Cp1252')
#msg = msg.decode('unicode_escape').encode('utf8')
#print (msg[0:10])
#if msg.startswith("Final score:"):
if msg.startswith("Final score:"):
splitted = msg.split(" ")
if len(splitted) == 5:
team_1_score = int(splitted[2].split("-")[0])
team_2_score = int(splitted[2].split("-")[1])
handleFinalScore(team_1_score, team_2_score)
# Goal
pattern = re.compile("^[0-9]+-[0-9]*")
if (pattern.match(msg)):
scores = re.findall(r'\d+', msg)
new_score_1 = int(scores[0])
new_score_2 = int(scores[1])
parts = msg.split(" ")
if len(parts) > 1:
player_name = str(parts[1])
player = None
if player_name in all_players.keys():
player = all_players[player_name]
assistee = None
if (len(parts) > 2):
assistee_name = (parts[2])[1:-1]
if assistee_name in all_players.keys():
assistee = all_players[assistee_name]
if player:
handleScore(new_score_1, new_score_2, player, assistee)
#print "goal for player: " + player.name
else:
pass
#print "chat message: " + msg
# Sets correct team using player's x coordinate
def setTeam(player, player_x, player_y):
limit = 300
if (player_x < limit):
player.team = 0
else:
player.team = 1
#print "set team for " + player.name + ":" + str(player.team)
def handleRaceState(length, data):
#print "race stat len: " + str(length)
player_count = 1#len(players)
index = 0
#identifier = struct.unpack('>I', data[index:index+4])[0]
#index = index + 4
#print "identifier:" + str(identifier)
statcount = struct.unpack('b', data[19:20])[0]
#print "statcount:" + str(statcount)
racetime = readInteger(data, 0)
index = index + 4
#print "racetime: " + str(racetime)
global is_server_video
global players_have_teams
if is_server_video:
race_stat_size = 32
start_byte = 20
else:
race_stat_size = 14
start_byte = 20
for i in range(statcount):
index = race_stat_size * (i) + start_byte
#print "index:" + str(index)
#if not is_server_video:
#index += 3 # angle first
if is_server_video:
player_x = readFloat(data, index) #struct.unpack('>f', data[index:index+4])[0]
index += 4
player_y = readFloat(data, index)
index += 4
else:
byte1 = readByte(data, index)
byte2 = readByte(data, index+1)
#printAsHex(data[index:index+8])
#player_x = readInteger(data, index)
index += 2
#print str(byte1) + " " + str(byte2)
player_x = byte1*16 + byte2/16 #player_x >> 8
#player_x = int(player_x&0xf)*16+int(player_x&0xf0)/16
#index = index + 2
player_y = readInteger(data, index)
index += 4
player_y = int(player_y&0xf)*16+int(player_x&0xf0)/16
#print(':'.join(x.encode('hex') for x in data[index:index+17]))
#index = index + 24 - 16
#print "index:" + str(index)
if not players_have_teams and racetime < 10000:
player = playerByIndex(i)
if player:
setTeam(player, player_x, player_y)
players_have_teams = True
#print player.name
#print "x: " + str(player_x) + " y: " + str(player_y)
#print "next"
#print ""
#print(':'.join(x.encode('hex') for x in data))
#print (data[0:length])
def handleMessage(length, type, data):
#print "length " + str(length)
#print "type " + str(type)
# File Header
if (type == 500):
handleFileHeader(length, data)
# Game Header
if (type == 5):
handleHeader(length, data)
# Chat
if (type == 11):
handleChat(length, data)
# Race State Update
if (type == 9):
handleRaceState(length, data)
pass
def is_gzip_file(file_name):
f = open(file_name)
magic = f.read(2)
f.close()
if magic != '\037\213':
return False
else:
return True
files = sys.argv[1:]
# Read files in given directory
if len(files) == 1 and os.path.isdir(files[0]):
files = [os.path.join(files[0],fn) for fn in next(os.walk(files[0]))[2]]
for file_name in files:
#print file_name
# Skip directories
if (os.path.isdir(file_name)):
print "skipping path: " + file_name
continue
if not is_gzip_file(file_name):
print "skipping unknown file: " + file_name
continue
with gzip.open(file_name, "rb") as file:
players = {}
players_have_teams = False
#with open(file_name, "rb") as file:
rawfile = file.read()
fileIndex = 0
fileLength = len(rawfile)
#print fileLength
while fileIndex < fileLength:
#print "fileIndex " + str(fileIndex)
message_length = readInteger(rawfile, fileIndex)
message_type = readInteger(rawfile, fileIndex + 4)
short_message = (message_length & 0xff000000) != 0
#print "short_message: " + str(short_message)
if short_message or message_length == 0:
message_length = 2
#print "message_length: " + str(message_length)
if not short_message:
handleMessage(message_length, message_type, rawfile[fileIndex+8 : fileIndex+8+message_length])
if (message_length > 0 and not short_message):
fileIndex = fileIndex + message_length + 4
else:
fileIndex = fileIndex + 2
for player in all_players.values():
player.dump_stats()
print " "
|
|
#!/usr/bin/env python
"""
Custom collections classes.
"""
__all__ = [
"ValidatedList",
"ValidatedDict",
]
# =====================================================================
# GLOBAL IMPORTS
# =====================================================================
from collections import abc
# =====================================================================
# VALIDATED LIST
# =====================================================================
class ValidatedList(list):
"""A list that runs custom converter and validators when new elements are added.
Multiple converters and validators can be assigned to the list. These
are executed in the given order with converters run before validators.
Validators must take the new element as the first argument and raise
an exception if validation fails.
validator(new_element) -> None
Converters must also take the new element as the first argument, but
they have to return the converted value.
converter(new_element) -> converted_value
Examples
--------
We can define validator and converter functions that are run on each
element of the list.
>>> def is_positive_validator(value):
... if value <= 0:
... raise TypeError('value must be positive')
...
>>> vl = ValidatedList([1, -1], validator=is_positive_validator)
Traceback (most recent call last):
...
TypeError: value must be positive
Multiple converters that are run before the validators can be specified.
>>> vl = ValidatedList([-1, '2', 3.0], converter=[float, abs],
... validator=is_positive_validator)
>>> vl
[1.0, 2.0, 3.0]
"""
def __init__(self, seq=(), converter=None, validator=None):
"""
Initialize the list.
Parameters
----------
seq : Iterable
A sequence of elements.
converter : callable or List[callable]
Functions that will be used to convert each new element of
the list.
validator : callable or List[callable]
Functions that will be used to convert each new element of
the list.
"""
# Make sure converter and validator are always iterables.
if not (converter is None or isinstance(converter, abc.Iterable)):
converter = [converter]
if not (validator is None or isinstance(validator, abc.Iterable)):
validator = [validator]
self._converters = converter
self._validators = validator
# Validate and convert the whole sequence.
seq = self._convert_and_validate(seq)
super().__init__(seq)
def extend(self, iterable):
iterable = self._convert_and_validate(iterable)
super().extend(iterable)
def append(self, p_object):
p_object = self._convert_and_validate([p_object])[0]
super().append(p_object)
def insert(self, index, p_object):
p_object = self._convert_and_validate([p_object])[0]
super().insert(index, p_object)
def __iadd__(self, other):
other = self._convert_and_validate(other)
return super().__iadd__(other)
def __setitem__(self, key, value):
if isinstance(key, slice):
value = self._convert_and_validate(value)
else:
value = self._convert_and_validate([value])[0]
super().__setitem__(key, value)
def copy(self):
# Make sure a shallow copy still returns a ValidatedList.
return self.__class__(self)
def __getitem__(self, item):
# Make sure a slice returns a ValidatedList.
if isinstance(item, slice):
return self.__class__(super().__getitem__(item))
return super().__getitem__(item)
# This is needed for pickling. See https://github.com/openforcefield/openff-toolkit/issues/411
# for more details.
# TODO: Is there a cleaner way (getstate/setstate perhaps?) to allow FFs to be
# pickled?
def __reduce__(self):
return (__class__, (list(self),), self.__dict__)
def _convert_and_validate(self, seq):
"""Run all converters and the validator on the given sequence."""
# Run all element converters.
if self._converters is not None:
for converter in self._converters:
seq = [converter(element) for element in seq]
# Run all element validators.
if self._validators is not None:
for validator in self._validators:
for element in seq:
validator(element)
return seq
class ValidatedDict(dict):
"""A dict that runs custom converter and validators when new
elements are added.
Multiple converters and validators can be assigned to the dict.
These are executed in the given order with converters run before
validators.
Validators must take the new element as the first argument and raise
an exception if validation fails.
validator(new_element) -> None
Converters must also take the new element as the first argument, but
they have to return the converted value.
converter(new_element) -> converted_value
Examples
--------
We can define validator and converter functions that are run on each
value of the dict.
>>> def is_positive_validator(value):
... if value <= 0:
... raise TypeError('value must be positive')
...
>>> vl = ValidatedDict({'a': 1, 'b': -1}, validator=is_positive_validator)
Traceback (most recent call last):
...
TypeError: value must be positive
Multiple converters that are run before the validators can be specified.
>>> vl = ValidatedDict({'c': -1, 'd': '2', 'e': 3.0}, converter=[float, abs],
... validator=is_positive_validator)
>>> vl
{'c': 1.0, 'd': 2.0, 'e': 3.0}
"""
def __init__(self, mapping, converter=None, validator=None):
"""
Initialize the dict.
Parameters
----------
mapping : Mapping
A mapping of elements, probably a dict.
converter : callable or List[callable]
Functions that will be used to convert each new element of
the dict.
validator : callable or List[callable]
Functions that will be used to convert each new element of
the dict.
"""
# Make sure converter and validator are always iterables.
if not (converter is None or isinstance(converter, abc.Iterable)):
converter = [converter]
if not (validator is None or isinstance(validator, abc.Iterable)):
validator = [validator]
self._converters = converter
self._validators = validator
# Validate and convert the whole mapping
mapping = self._convert_and_validate(mapping)
super().__init__(mapping)
def update(self, other):
other = self._convert_and_validate(dict(other))
super().update(other)
def copy(self):
return self.__class__(self)
def __setitem__(self, key, value):
value = self._convert_and_validate({None: value})[None]
super().__setitem__(key, value)
# This is needed for pickling. See https://github.com/openforcefield/openff-toolkit/issues/411
# for more details.
# TODO: Is there a cleaner way (getstate/setstate perhaps?) to allow FFs to be
# pickled?
def __reduce__(self):
return (__class__, (dict(self),), self.__dict__)
def _convert_and_validate(self, mapping):
"""Run all converters and the validator on the given mapping."""
# Run all element converters.
if self._converters is not None:
for converter in self._converters:
mapping = {key: converter(value) for key, value in mapping.items()}
# Run all element validators.
if self._validators is not None:
for validator in self._validators:
for value in mapping.values():
validator(value)
return mapping
if __name__ == "__main__":
import doctest
doctest.run_docstring_examples(ValidatedList, globals())
|
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import urllib
import xml.sax
import base64
import boto
from boto import config
from boto.connection import AWSQueryConnection
from boto.resultset import ResultSet
from boto.ec2.image import Image, ImageAttribute
from boto.ec2.instance import Reservation, Instance, ConsoleOutput
from boto.ec2.keypair import KeyPair
from boto.ec2.address import Address
from boto.ec2.zone import Zone
from boto.ec2.securitygroup import SecurityGroup
from boto.exception import EC2ResponseError
class EC2Connection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'ec2_version', '2008-02-01')
SignatureVersion = '1'
ResponseError = EC2ResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='ec2.amazonaws.com', debug=0,
https_connection_factory=None):
if config.has_option('Boto', 'ec2_host'):
host = config.get('Boto', 'ec2_host')
AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory)
# Image methods
def get_all_images(self, image_ids=None, owners=None, executable_by=None):
params = {}
if image_ids:
self.build_list_params(params, image_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
if executable_by:
self.build_list_params(params, executable_by, 'ExecutableBy')
return self.get_list('DescribeImages', params, [('item', Image)])
def register_image(self, image_location):
params = {'ImageLocation':image_location}
rs = self.get_object('RegisterImage', params, ResultSet)
return rs.imageId
def deregister_image(self, image_id):
return self.get_status('DeregisterImage', {'ImageId':image_id})
# ImageAttribute methods
def get_image_attribute(self, image_id, attribute='launchPermission'):
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_object('DescribeImageAttribute', params, ImageAttribute)
def modify_image_attribute(self, image_id, attribute='launchPermission',
operation='add', user_ids=None, groups=None):
params = {'ImageId' : image_id,
'Attribute' : attribute,
'OperationType' : operation}
if user_ids:
self.build_list_params(params, user_ids, 'UserId')
if groups:
self.build_list_params(params, groups, 'UserGroup')
return self.get_status('ModifyImageAttribute', params)
def reset_image_attribute(self, image_id, attribute='launchPermission'):
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_status('ResetImageAttribute', params)
# Instance methods
def get_all_instances(self, instance_ids=None):
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('DescribeInstances', params, [('item', Reservation)])
def run_instances(self, image_id, min_count=1, max_count=1,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None):
params = {'ImageId':image_id,
'MinCount':min_count,
'MaxCount': max_count}
if key_name:
params['KeyName'] = key_name
if security_groups:
l = []
for group in security_groups:
if isinstance(group, SecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroup')
if user_data:
params['UserData'] = base64.b64encode(user_data)
if addressing_type:
params['AddressingType'] = addressing_type
if instance_type:
params['InstanceType'] = instance_type
if placement:
params['Placement.AvailabilityZone'] = placement
return self.get_object('RunInstances', params, Reservation)
def terminate_instances(self, instance_ids=None):
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('TerminateInstances', params, [('item', Instance)])
def get_console_output(self, instance_id):
params = {}
self.build_list_params(params, [instance_id], 'InstanceId')
return self.get_object('GetConsoleOutput', params, ConsoleOutput)
def reboot_instances(self, instance_ids=None):
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_status('RebootInstances', params)
def confirm_product_instance(self, product_code, instance_id):
params = {'ProductCode' : product_code,
'InstanceId' : instance_id}
rs = self.get_object('ConfirmProductInstance', params, ResultSet)
return (rs.status, rs.ownerId)
# Zone methods
def get_all_zones(self, zones=None):
params = {}
if zones:
self.build_list_params(params, zones, 'ZoneName')
return self.get_list('DescribeAvailabilityZones', params, [('item', Zone)])
# Address methods
def get_all_addresses(self, addresses=None):
params = {}
if addresses:
self.build_list_params(params, addresses, 'PublicIp')
return self.get_list('DescribeAddresses', params, [('item', Address)])
def allocate_address(self):
return self.get_object('AllocateAddress', None, Address)
def release_address(self, public_ip):
params = {'PublicIp' : public_ip}
return self.get_status('ReleaseAddress', params)
def associate_address(self, instance_id, public_ip):
params = {'InstanceId' : instance_id, 'PublicIp' : public_ip}
return self.get_status('AssociateAddress', params)
def disassociate_address(self, public_ip):
params = {'PublicIp' : public_ip}
return self.get_status('DisassociateAddress', params)
# Keypair methods
def get_all_key_pairs(self, keynames=None):
params = {}
if keynames:
self.build_list_params(params, keynames, 'KeyName')
return self.get_list('DescribeKeyPairs', params, [('item', KeyPair)])
def create_key_pair(self, key_name):
params = {'KeyName':key_name}
return self.get_object('CreateKeyPair', params, KeyPair)
def delete_key_pair(self, key_name):
params = {'KeyName':key_name}
return self.get_status('DeleteKeyPair', params)
# SecurityGroup methods
def get_all_security_groups(self, groupnames=None):
params = {}
if groupnames:
self.build_list_params(params, groupnames, 'GroupName')
return self.get_list('DescribeSecurityGroups', params, [('item', SecurityGroup)])
def create_security_group(self, name, description):
params = {'GroupName':name, 'GroupDescription':description}
group = self.get_object('CreateSecurityGroup', params, SecurityGroup)
group.name = name
group.description = description
return group
def delete_security_group(self, name):
params = {'GroupName':name}
return self.get_status('DeleteSecurityGroup', params)
def authorize_security_group(self, group_name, src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None):
params = {'GroupName':group_name}
if src_security_group_name:
params['SourceSecurityGroupName'] = src_security_group_name
if src_security_group_owner_id:
params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id
if ip_protocol:
params['IpProtocol'] = ip_protocol
if from_port:
params['FromPort'] = from_port
if to_port:
params['ToPort'] = to_port
if cidr_ip:
params['CidrIp'] = urllib.quote(cidr_ip)
return self.get_status('AuthorizeSecurityGroupIngress', params)
def revoke_security_group(self, group_name, src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None):
params = {'GroupName':group_name}
if src_security_group_name:
params['SourceSecurityGroupName'] = src_security_group_name
if src_security_group_owner_id:
params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id
if ip_protocol:
params['IpProtocol'] = ip_protocol
if from_port:
params['FromPort'] = from_port
if to_port:
params['ToPort'] = to_port
if cidr_ip:
params['CidrIp'] = cidr_ip
return self.get_status('RevokeSecurityGroupIngress', params)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stubs for File service."""
import base64
import datetime
import hashlib
import os
import random
import string
import StringIO
import tempfile
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import blobstore as api_blobstore
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.files import blobstore as files_blobstore
from google.appengine.api.files import file as files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import gs
from google.appengine.ext import blobstore
from google.appengine.ext.cloudstorage import cloudstorage_stub
from google.appengine.runtime import apiproxy_errors
MAX_REQUEST_SIZE = 32 << 20
GS_INFO_KIND = blobstore_stub._GS_INFO_KIND
_now_function = datetime.datetime.now
def _to_seconds(datetime_obj):
return int(time.mktime(datetime_obj.timetuple()))
def _random_string(length):
"""Generate a random string of given length."""
return ''.join(
random.choice(string.letters + string.digits) for _ in range(length))
def raise_error(error_code, error_detail=''):
"""Raise application error helper method."""
raise apiproxy_errors.ApplicationError(error_code, error_detail=error_detail)
_BLOBSTORE_DIRECTORY = files_blobstore._BLOBSTORE_DIRECTORY
_GS_PREFIX = gs._GS_PREFIX
_GS_UPLOAD_PREFIX = _GS_PREFIX + 'writable:'
class _GoogleStorageUpload(tuple):
"""Stores information about a writable Google Storage file."""
buf = property(lambda self: self[0])
content_type = property(lambda self: self[1])
gs_filename = property(lambda self: self[2])
class GoogleStorage(object):
"""Virtual google storage to be used by file api."""
def _Upload(self, buf, content_type, gs_filename):
return _GoogleStorageUpload([buf, content_type, gs_filename])
def __init__(self, blob_storage):
"""Constructor.
Args:
blob_storage:
apphosting.api.blobstore.blobstore_stub.BlobStorage instance.
"""
self.blob_storage = blob_storage
self.gs_stub = cloudstorage_stub.CloudStorageStub(self.blob_storage)
self.uploads = {}
self.finalized = set()
self.sequence_keys = {}
def remove_gs_prefix(self, gs_filename):
return gs_filename[len('/gs'):]
def add_gs_prefix(self, gs_filename):
return '/gs' + gs_filename
def get_blobkey(self, gs_filename):
return blobstore.create_gs_key(gs_filename)
def has_upload(self, filename):
"""Checks if there is an upload at this filename."""
return filename in self.uploads
def finalize(self, filename):
"""Marks file as finalized."""
upload = self.uploads[filename]
self.finalized.add(filename)
upload.buf.seek(0)
content = upload.buf.read()
blobkey = self.gs_stub.post_start_creation(
self.remove_gs_prefix(upload.gs_filename),
{'content-type': upload.content_type})
assert blobkey == self.get_blobkey(upload.gs_filename)
self.gs_stub.put_continue_creation(
blobkey, content, (0, len(content) - 1), len(content))
del self.sequence_keys[filename]
def is_finalized(self, filename):
"""Checks if file is already finalized."""
assert filename in self.uploads
return filename in self.finalized
def start_upload(self, request):
"""Starts a new upload based on the specified CreateRequest."""
mime_type = None
gs_filename = request.filename()
ignored_parameters = [
gs._CACHE_CONTROL_PARAMETER,
gs._CANNED_ACL_PARAMETER,
gs._CONTENT_DISPOSITION_PARAMETER,
gs._CONTENT_ENCODING_PARAMETER,
]
for param in request.parameters_list():
name = param.name()
if name == gs._MIME_TYPE_PARAMETER:
mime_type = param.value()
elif (name in ignored_parameters or
name.startswith(gs._USER_METADATA_PREFIX)):
pass
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
if not mime_type:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
elif not gs_filename:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
random_str = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(64))
writable_name = '%s%s' % (
_GS_UPLOAD_PREFIX, base64.urlsafe_b64encode(random_str))
self.uploads[writable_name] = self._Upload(
StringIO.StringIO(), mime_type, gs_filename)
self.sequence_keys[writable_name] = None
datastore.Delete(
datastore.Key.from_path(GS_INFO_KIND,
self.get_blobkey(gs_filename),
namespace=''))
return writable_name
def append(self, filename, data, sequence_key):
"""Appends data to the upload filename."""
assert not self.is_finalized(filename)
if sequence_key:
current_sequence_key = self.sequence_keys[filename]
if current_sequence_key and current_sequence_key >= sequence_key:
raise_error(file_service_pb.FileServiceErrors.SEQUENCE_KEY_OUT_OF_ORDER,
error_detail=current_sequence_key)
self.sequence_keys[filename] = sequence_key
self.uploads[filename].buf.write(data)
def stat(self, gs_filename):
"""
Returns:
file info for a finalized file with given filename
"""
blob_key = self.get_blobkey(gs_filename)
try:
fileinfo = datastore.Get(
datastore.Key.from_path(GS_INFO_KIND, blob_key, namespace=''))
fileinfo['filename'] = self.add_gs_prefix(fileinfo['filename'])
return fileinfo
except datastore_errors.EntityNotFoundError:
raise raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR,
gs_filename)
def get_reader(self, gs_filename):
try:
return self.blob_storage.OpenBlob(self.get_blobkey(gs_filename))
except IOError:
return None
def listdir(self, request, response):
"""listdir.
Args:
request: ListDir RPC request.
response: ListDir RPC response.
Returns:
A list of fully qualified filenames under a certain path sorted by in
char order.
"""
path = self.remove_gs_prefix(request.path())
prefix = request.prefix() if request.has_prefix() else ''
q = datastore.Query(GS_INFO_KIND, namespace='')
fully_qualified_name = '/'.join([path, prefix])
if request.has_marker():
q['filename >'] = '/'.join([path, request.marker()])
else:
q['filename >='] = fully_qualified_name
if request.has_max_keys():
max_keys = request.max_keys()
else:
max_keys = 2**31-1
for gs_file_info in q.Get(max_keys):
filename = gs_file_info['filename']
if filename.startswith(fully_qualified_name):
response.add_filenames(self.add_gs_prefix(filename))
else:
break
class GoogleStorageFile(object):
"""File object for '/gs/' files."""
def __init__(self, open_request, file_storage):
self.filename = open_request.filename()
self.file_storage = file_storage
self.open_mode = open_request.open_mode()
content_type = open_request.content_type()
if self.is_appending:
if not self.filename.startswith(_GS_UPLOAD_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
elif not self.file_storage.has_upload(self.filename):
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
elif self.file_storage.is_finalized(self.filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
else:
if not self.filename.startswith(_GS_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
elif self.filename.startswith(_GS_UPLOAD_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
else:
self.buf = self.file_storage.get_reader(self.filename)
if not self.buf:
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
if content_type != file_service_pb.FileContentType.RAW:
raise_error(file_service_pb.FileServiceErrors.WRONG_CONTENT_TYPE)
@property
def is_appending(self):
"""Checks if the file is opened for appending or reading."""
return self.open_mode == file_service_pb.OpenRequest.APPEND
def stat(self, request, response):
"""Fill response with file stat.
Current implementation only fills length, finalized, filename, and content
type. File must be opened in read mode before stat is called.
"""
file_info = self.file_storage.stat(self.filename)
file_stat = response.add_stat()
file_stat.set_filename(file_info['filename'])
file_stat.set_finalized(True)
file_stat.set_length(file_info['size'])
file_stat.set_ctime(_to_seconds(file_info['creation']))
file_stat.set_mtime(_to_seconds(file_info['creation']))
file_stat.set_content_type(file_service_pb.FileContentType.RAW)
response.set_more_files_found(False)
def read(self, request, response):
"""Copies up to max_bytes starting at pos into response from filename."""
if self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.buf.seek(request.pos())
data = self.buf.read(request.max_bytes())
response.set_data(data)
def append(self, request, response):
"""Appends data to filename."""
if not self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.file_storage.append(
self.filename, request.data(), request.sequence_key())
def finalize(self):
"""Finalize a file.
Copies temp file data to permanent location for reading.
"""
if not self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
elif self.file_storage.is_finalized(self.filename):
raise_error(
file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.file_storage.finalize(self.filename)
class BlobstoreStorage(object):
"""Virtual file storage to be used by file api.
Abstracts away all aspects of logical and physical file organization of the
API.
"""
def __init__(self, blob_storage):
"""Constructor.
Args:
blob_storage: An instance of
apphosting.api.blobstore.blobstore_stub.BlobStorage to use for blob
integration.
"""
self.blob_keys = {}
self.blobstore_files = set()
self.finalized_files = set()
self.created_files = set()
self.data_files = {}
self.sequence_keys = {}
self.blob_storage = blob_storage
self.blob_content_types = {}
self.blob_file_names = {}
def finalize(self, filename):
"""Marks file as finalized."""
if self.is_finalized(filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.finalized_files.add(filename)
def is_finalized(self, filename):
"""Checks if file is already finalized."""
return filename in self.finalized_files
def get_blob_key(self, ticket):
"""Gets blob key for blob creation ticket."""
return self.blob_keys.get(ticket)
def register_blob_key(self, ticket, blob_key):
"""Register blob key for a ticket."""
self.blob_keys[ticket] = blob_key
def has_blobstore_file(self, filename):
"""Checks if blobstore file was already created."""
return filename in self.blobstore_files
def add_blobstore_file(self, request):
"""Registers a created blob store file."""
mime_type = None
blob_filename = ''
for param in request.parameters_list():
name = param.name()
if name == files_blobstore._MIME_TYPE_PARAMETER:
mime_type = param.value()
elif name == files_blobstore._BLOBINFO_UPLOADED_FILENAME_PARAMETER:
blob_filename = param.value()
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
if mime_type is None:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
random_str = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(64))
filename = (_BLOBSTORE_DIRECTORY +
files._CREATION_HANDLE_PREFIX +
base64.urlsafe_b64encode(random_str))
self.blobstore_files.add(filename)
self.blob_content_types[filename] = mime_type
self.blob_file_names[filename] = blob_filename
return filename
def get_sequence_key(self, filename):
"""Get sequence key for a file."""
return self.sequence_keys.get(filename, '')
def set_sequence_key(self, filename, sequence_key):
"""Set sequence key for a file."""
self.sequence_keys[filename] = sequence_key
def stat(self, filename):
"""
Returns:
file info for a finalized file with given filename."""
blob_key = files_blobstore.get_blob_key(filename)
file_info = datastore.Get(
datastore.Key.from_path(api_blobstore.BLOB_INFO_KIND, str(blob_key),
namespace=''))
if file_info == None:
raise raise_error(
file_service_pb.FileServiceErrors.EXISTENCE_ERROR_MEATADATA_NOT_FOUND,
filename)
return file_info
def save_blob(self, filename, blob_key):
"""Save filename temp data to a blobstore under given key."""
f = self._get_data_file(filename)
f.seek(0)
self.blob_storage.StoreBlob(blob_key, f)
f.seek(0, os.SEEK_END)
size = f.tell()
f.close()
del self.data_files[filename]
return size
def _get_data_file(self, filename):
"""Get a temp data file for a file."""
if not filename in self.data_files:
f = tempfile.TemporaryFile()
self.data_files[filename] = f
return f
return self.data_files[filename]
def get_md5_from_blob(self, blobkey):
"""Get md5 hexdigest of the blobfile with blobkey."""
try:
f = self.blob_storage.OpenBlob(blobkey)
file_md5 = hashlib.md5()
file_md5.update(f.read())
return file_md5.hexdigest()
finally:
f.close()
def append(self, filename, data):
"""Append data to file."""
self._get_data_file(filename).write(data)
def get_content_type(self, filename):
return self.blob_content_types[filename]
def get_blob_file_name(self, filename):
return self.blob_file_names[filename]
class BlobstoreFile(object):
"""File object for generic /blobstore/ file."""
def __init__(self, open_request, file_storage):
"""Constructor.
Args:
open_request: An instance of open file request.
file_storage: An instance of BlobstoreStorage.
"""
self.filename = open_request.filename()
self.file_storage = file_storage
self.blob_reader = None
self.content_type = None
self.mime_content_type = None
open_mode = open_request.open_mode()
content_type = open_request.content_type()
if not self.filename.startswith(_BLOBSTORE_DIRECTORY):
if not self.file_storage.has_blobstore_file(self.filename):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
self.ticket = self.filename[len(_BLOBSTORE_DIRECTORY):]
if open_mode == file_service_pb.OpenRequest.APPEND:
if not self.file_storage.has_blobstore_file(self.filename):
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
if self.file_storage.is_finalized(self.filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.mime_content_type = self.file_storage.get_content_type(self.filename)
self.blob_file_name = self.file_storage.get_blob_file_name(self.filename)
else:
if self.ticket.startswith(files._CREATION_HANDLE_PREFIX):
blobkey = self.file_storage.get_blob_key(self.ticket)
if not blobkey:
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'Blobkey not found.')
else:
blobkey = self.ticket
blob_info = blobstore.BlobInfo.get(blobkey)
if not blob_info:
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'Blobinfo not found.')
self.blob_reader = blobstore.BlobReader(blob_info)
self.mime_content_type = blob_info.content_type
if content_type != file_service_pb.FileContentType.RAW:
raise_error(file_service_pb.FileServiceErrors.WRONG_CONTENT_TYPE)
@property
def is_appending(self):
"""Checks if the file is opened for appending or reading."""
return self.blob_reader == None
def stat(self, request, response):
"""Fill response with file stat.
Current implementation only fills length, finalized, filename, and content
type. File must be opened in read mode before stat is called.
"""
file_info = self.file_storage.stat(self.filename)
file_stat = response.add_stat()
file_stat.set_filename(self.filename)
file_stat.set_finalized(True)
file_stat.set_length(file_info['size'])
file_stat.set_ctime(_to_seconds(file_info['creation']))
file_stat.set_mtime(_to_seconds(file_info['creation']))
file_stat.set_content_type(file_service_pb.FileContentType.RAW)
response.set_more_files_found(False)
def read(self, request, response):
"""Read data from file
Args:
request: An instance of file_service_pb.ReadRequest.
response: An instance of file_service_pb.ReadResponse.
"""
if self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.blob_reader.seek(request.pos())
response.set_data(self.blob_reader.read(request.max_bytes()))
def append(self, request, response):
"""Append data to file.
Args:
request: An instance of file_service_pb.AppendRequest.
response: An instance of file_service_pb.AppendResponse.
"""
sequence_key = request.sequence_key()
if sequence_key:
current_sequence_key = self.file_storage.get_sequence_key(self.filename)
if current_sequence_key and current_sequence_key >= sequence_key:
raise_error(file_service_pb.FileServiceErrors.SEQUENCE_KEY_OUT_OF_ORDER,
error_detail=current_sequence_key)
self.file_storage.set_sequence_key(self.filename, sequence_key)
self.file_storage.append(self.filename, request.data())
def finalize(self):
"""Finalize a file.
Copies temp file data to the blobstore.
"""
self.file_storage.finalize(self.filename)
blob_key = _random_string(64)
self.file_storage.register_blob_key(self.ticket, blob_key)
size = self.file_storage.save_blob(self.filename, blob_key)
blob_info = datastore.Entity(api_blobstore.BLOB_INFO_KIND,
name=str(blob_key), namespace='')
blob_info['content_type'] = self.mime_content_type
blob_info['creation'] = _now_function()
blob_info['filename'] = self.blob_file_name
blob_info['size'] = size
blob_info['creation_handle'] = self.ticket
blob_info['md5_hash'] = self.file_storage.get_md5_from_blob(blob_key)
datastore.Put(blob_info)
blob_file = datastore.Entity('__BlobFileIndex__',
name=self.ticket,
namespace='')
blob_file['blob_key'] = str(blob_key)
datastore.Put(blob_file)
class FileServiceStub(apiproxy_stub.APIProxyStub):
"""Python stub for file service."""
THREADSAFE = False
def __init__(self, blob_storage):
"""Constructor."""
super(FileServiceStub, self).__init__('file',
max_request_size=MAX_REQUEST_SIZE)
self.open_files = {}
self.file_storage = BlobstoreStorage(blob_storage)
self.gs_storage = GoogleStorage(blob_storage)
def _Dynamic_Create(self, request, response):
filesystem = request.filesystem()
if request.has_filename() and filesystem != gs._GS_FILESYSTEM:
raise_error(file_service_pb.FileServiceErrors.FILE_NAME_SPECIFIED)
if filesystem == files_blobstore._BLOBSTORE_FILESYSTEM:
response.set_filename(self.file_storage.add_blobstore_file(request))
elif filesystem == gs._GS_FILESYSTEM:
response.set_filename(self.gs_storage.start_upload(request))
else:
raise_error(file_service_pb.FileServiceErrors.UNSUPPORTED_FILE_SYSTEM)
def _Dynamic_Open(self, request, response):
"""Handler for Open RPC call."""
filename = request.filename()
if request.exclusive_lock() and filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.EXCLUSIVE_LOCK_FAILED)
if filename.startswith(_BLOBSTORE_DIRECTORY):
self.open_files[filename] = BlobstoreFile(request, self.file_storage)
elif filename.startswith(_GS_PREFIX):
self.open_files[filename] = GoogleStorageFile(request, self.gs_storage)
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
def _Dynamic_Close(self, request, response):
"""Handler for Close RPC call."""
filename = request.filename()
finalize = request.finalize()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
if finalize:
self.open_files[filename].finalize()
del self.open_files[filename]
def _Dynamic_Stat(self, request, response):
"""Handler for Stat RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
file = self.open_files[filename]
if file.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
file.stat(request, response)
def _Dynamic_Read(self, request, response):
"""Handler for Read RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
self.open_files[filename].read(request, response)
def _Dynamic_Append(self, request, response):
"""Handler for Append RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
self.open_files[filename].append(request, response)
def _Dynamic_GetCapabilities(self, request, response):
"""Handler for GetCapabilities RPC call."""
response.add_filesystem('blobstore')
response.add_filesystem('gs')
response.set_shuffle_available(False)
def _Dynamic_GetDefaultGsBucketName(self, request, response):
"""Handler for GetDefaultGsBucketName RPC call."""
response.set_default_gs_bucket_name('app_default_bucket')
def _Dynamic_ListDir(self, request, response):
"""Handler for ListDir RPC call.
Only for dev app server. See b/6761691.
"""
path = request.path()
if not path.startswith(_GS_PREFIX):
raise_error(file_service_pb.FileServiceErrors.UNSUPPORTED_FILE_SYSTEM)
self.gs_storage.listdir(request, response)
|
|
import copy
import base64
import mock
from webtest.app import TestRequest
import elasticsearch
from daybed import __version__ as VERSION, API_VERSION
from daybed.permissions import invert_permissions_matrix
from daybed.backends.exceptions import (
RecordNotFound, ModelNotFound
)
from daybed.tests.support import BaseWebTest, force_unicode
from daybed.schemas import registry
MODEL_DEFINITION = {
'definition': {
"title": "simple",
"description": "One optional field",
"fields": [{"name": "age",
"hint": "An integer",
"label": "Age",
"type": "int",
"required": False}]
}
}
MODEL_PERMISSIONS = [
'create_record',
'delete_all_records',
'delete_model',
'delete_own_records',
'read_all_records',
'read_definition',
'read_own_records',
'read_permissions',
'update_all_records',
'update_definition',
'update_own_records',
'update_permissions',
]
MODEL_RECORD = {'age': 42}
MODEL_RECORD2 = {'age': 25}
class FieldsViewTest(BaseWebTest):
def __init__(self, *args, **kwargs):
super(FieldsViewTest, self).__init__(*args, **kwargs)
if not hasattr(self, 'assertCountEqual'):
self.assertCountEqual = self.assertItemsEqual
def test_fields_are_listed(self):
response = self.app.get('/fields')
fields = response.json
names = [f.get('name') for f in fields]
self.assertCountEqual(names, registry.names)
# String field has no parameters
stringfield = [f for f in fields if f.get('name') == 'string'][0]
self.assertIsNone(stringfield.get('parameters'))
self.assertEquals(stringfield['default_hint'],
'A set of characters')
# Enum field describes list items type
enumfield = [f for f in fields if f.get('name') == 'enum'][0]
_type = enumfield['parameters'][0].get('items', {}).get('type')
self.assertEqual('string', _type)
# Point field describes GPS with default True
pointfield = [f for f in fields if f.get('name') == 'point'][0]
self.assertCountEqual(pointfield['parameters'],
[dict(name="gps",
default=True,
type="boolean",
label="Gps")])
class HelloViewTest(BaseWebTest):
def test_returns_info_about_url_and_version(self):
response = self.app.get('/')
self.assertEqual(response.json['version'], VERSION)
self.assertEqual(response.json['url'], 'http://localhost')
self.assertEqual(response.json['daybed'], 'hello')
def test_hello_uses_the_defined_http_scheme_if_defined(self):
original_scheme = (self.app.app.registry.settings
.get('daybed.http_scheme'))
try:
self.app.app.registry.settings['daybed.http_scheme'] = 'https'
response = self.app.get('/', headers=self.headers)
self.assertTrue(
response.json['url'].startswith('https'),
'%s should start with https' % response.json['url'])
finally:
self.app.app.registry.settings['daybed.http_scheme'] =\
original_scheme
def test_authentication_headers_should_be_ignored(self):
headers = self.headers.copy()
headers['Authorization'] = 'Basic boom'
self.app.get('/', headers=self.headers)
def test_redirect_to_version(self):
# We don't want the prefix to be automatically added for this test.
original_request_class = self.app.RequestClass
try:
self.app.RequestClass = TestRequest # Standard RequestClass.
# GET on the hello view.
response = self.app.get('/')
self.assertEqual(response.status_int, 307)
self.assertEqual(response.location,
'http://localhost/%s/' % API_VERSION)
# GET on the fields view.
response = self.app.get('/fields')
self.assertEqual(response.status_int, 307)
self.assertEqual(response.location,
'http://localhost/%s/fields' % API_VERSION)
finally:
self.app.RequestClass = original_request_class
class BasicAuthRegistrationTest(BaseWebTest):
model_id = 'simple'
@property
def valid_definition(self):
return {
"title": "simple",
"description": "One optional field",
"fields": [{"name": "age", "type": "int", "required": False}]
}
def test_unauthorized_if_no_credentials(self):
self.app.put_json('/models/%s' % self.model_id,
{'definition': self.valid_definition},
headers=self.headers)
resp = self.app.get('/models/%s' % self.model_id,
status=401)
self.assertIn('401', resp)
def test_unauthorized_on_invalid_credentials(self):
self.app.put_json('/models/%s' % self.model_id,
{'definition': self.valid_definition},
headers=self.headers)
auth = base64.b64encode(
u'foo:bar'.encode('ascii')).strip().decode('ascii')
resp = self.app.get('/models/%s' % self.model_id,
headers={
'Authorization': 'Basic {0}'.format(auth)
},
status=401)
self.assertIn('401', resp)
def test_forbidden_if_required_permission_missing(self):
self.app.put_json('/models/%s' % self.model_id,
{'definition': self.valid_definition},
headers=self.headers)
self.app.patch_json('/models/%s/permissions' % self.model_id,
{self.credentials['id']: ["-ALL"]},
headers=self.headers)
resp = self.app.get('/models/%s' % self.model_id,
headers=self.headers,
status=403)
self.assertIn('403', resp)
class SporeTest(BaseWebTest):
def test_spore_get(self):
resp = self.app.get('/spore',
headers=self.headers, status=200)
self.assertEqual(resp.json['name'], 'daybed')
class DefinitionViewsTest(BaseWebTest):
def setUp(self):
super(DefinitionViewsTest, self).setUp()
model = copy.deepcopy(MODEL_DEFINITION)
model['definition']['fields'].append({
"name": "name",
"type": "string"
})
model['permissions'] = {"system.Everyone": ["ALL"]}
model['records'] = [{'name': 'Snowden', 'age': 31}]
self.app.put_json('/models/test',
model,
headers=self.headers)
def test_definition_retrieval(self):
resp = self.app.get('/models/test/definition',
headers=self.headers)
self.assertEqual(len(resp.json['fields']), 2)
def test_definition_update_returns_new_definition(self):
resp = self.app.put_json('/models/test/definition',
MODEL_DEFINITION['definition'],
headers=self.headers)
self.assertEqual(len(resp.json['fields']), 1)
def test_definition_update_must_be_valid(self):
definition = MODEL_DEFINITION['definition'].copy()
definition.pop('fields')
self.app.put_json('/models/test/definition',
definition,
headers=self.headers, status=400)
def test_definition_update_preserves_records(self):
self.app.put_json('/models/test/definition',
MODEL_DEFINITION['definition'],
headers=self.headers)
resp = self.app.get('/models/test/records',
headers=self.headers)
self.assertEqual(len(resp.json['records']), 1)
def test_definition_update_preserves_permissions(self):
self.app.put_json('/models/test/definition',
MODEL_DEFINITION['definition'],
headers=self.headers)
resp = self.app.get('/models/test/permissions',
headers=self.headers)
self.assertIn('system.Everyone', resp.json)
def test_model_creation_via_definition(self):
self.app.put_json('/models/new/definition',
MODEL_DEFINITION['definition'],
headers=self.headers)
resp = self.app.get('/models/new/records',
headers=self.headers)
self.assertEqual(len(resp.json['records']), 0)
class ModelsViewsTest(BaseWebTest):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(ModelsViewsTest, self).__init__(*args, **kwargs)
def test_models(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
resp = self.app.get('/models', headers=self.headers)
self.assertDictEqual(resp.json, {
"models": [
{
"id": "test",
"title": "simple",
"description": "One optional field",
}
]
})
def test_model_deletion(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
resp = self.app.post_json('/models/test/records',
MODEL_RECORD,
headers=self.headers)
record_id = resp.json['id']
resp = self.app.delete('/models/test',
headers=self.headers)
self.assertIn('name', resp.body.decode('utf-8'))
self.assertRaises(RecordNotFound,
self.db.get_record, 'test', record_id)
self.assertRaises(ModelNotFound, self.db.get_model_definition,
'test')
def test_unknown_model_deletion_raises_404(self):
self.app.delete('/models/unknown', {},
headers=self.headers,
status=404)
def test_retrieve_whole_model(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
resp = self.app.get('/models/test', {},
headers=self.headers)
self.assertEqual(resp.json['records'], [])
self.assertDictEqual(resp.json['permissions'],
{self.credentials['id']: MODEL_PERMISSIONS})
def test_permissions_unknown_retrieval(self):
resp = self.app.get('/models/test/permissions',
headers=self.headers, status=404)
self.assertDictEqual(
resp.json, force_unicode({
"errors": [{
"location": "path",
"name": "test",
"description": "model not found"}],
"status": "error"}))
def test_permissions_retrieval(self):
self.app.put_json('/models/test',
MODEL_DEFINITION,
headers=self.headers)
resp = self.app.get('/models/test/permissions',
headers=self.headers)
permissions = force_unicode(
{self.credentials['id']: MODEL_PERMISSIONS}
)
self.assertDictEqual(resp.json, permissions)
def test_patch_permissions(self):
self.app.put_json('/models/test',
MODEL_DEFINITION,
headers=self.headers)
self.db.store_credentials('foo', {'id': 'alexis', 'key': 'bar'})
self.db.store_credentials('foobar', {'id': 'remy', 'key': 'bar'})
resp = self.app.patch_json('/models/test/permissions',
{"alexis": ["read_permissions"],
"remy": ["update_permissions"]},
headers=self.headers)
permissions = force_unicode(
{self.credentials['id']: MODEL_PERMISSIONS}
)
permissions[u"alexis"] = [u"read_permissions"]
permissions[u"remy"] = [u"update_permissions"]
self.assertDictEqual(resp.json, permissions)
def test_put_permissions(self):
self.app.put_json('/models/test',
MODEL_DEFINITION,
headers=self.headers)
self.db.store_credentials('foo', {'id': 'alexis', 'key': 'bar'})
self.db.store_credentials('foobar', {'id': 'remy', 'key': 'bar'})
resp = self.app.put_json('/models/test/permissions',
{"alexis": ["read_permissions"],
"remy": ["update_permissions"]},
headers=self.headers)
permissions = dict()
permissions["alexis"] = ["read_permissions"]
permissions["remy"] = ["update_permissions"]
self.assertDictEqual(resp.json, force_unicode(permissions))
def test_post_model_definition_with_records(self):
model = MODEL_DEFINITION.copy()
model['records'] = [MODEL_RECORD, MODEL_RECORD]
resp = self.app.post_json('/models', model,
headers=self.headers)
model_id = resp.json['id']
self.assertEquals(len(self.db.get_records(model_id)), 2)
def test_put_model_definition_with_records(self):
model = MODEL_DEFINITION.copy()
model['records'] = [MODEL_RECORD, MODEL_RECORD]
resp = self.app.post_json('/models', model,
headers=self.headers)
model_id = resp.json['id']
model['records'] = [MODEL_RECORD]
resp = self.app.put_json('/models/%s' % model_id, model,
headers=self.headers)
self.assertEquals(len(self.db.get_records(model_id)), 1)
def test_post_model_definition_with_permissions(self):
model = MODEL_DEFINITION.copy()
model['permissions'] = {"Everyone": ["ALL"]}
resp = self.app.post_json('/models', model,
headers=self.headers)
model_id = resp.json['id']
definition = self.db.get_model_definition(model_id)
self.assertEquals(definition, MODEL_DEFINITION['definition'])
permissions = self.db.get_model_permissions(model_id)
self.assertEquals(invert_permissions_matrix(permissions),
{self.credentials['id']: MODEL_PERMISSIONS,
u'system.Everyone': MODEL_PERMISSIONS})
def test_put_model_definition_with_permissions(self):
model = MODEL_DEFINITION.copy()
model['permissions'] = {'Everyone': ['ALL']}
self.app.put_json('/models/test', model,
headers=self.headers)
permissions = self.db.get_model_permissions("test")
self.assertEquals(invert_permissions_matrix(permissions),
{self.credentials['id']: MODEL_PERMISSIONS,
u'system.Everyone': MODEL_PERMISSIONS})
def test_put_model_definition_new(self):
model = MODEL_DEFINITION.copy()
resp = self.app.put_json('/models/toto', model)
self.assertIn("id", resp.json)
def test_put_model_definition_update(self):
model = MODEL_DEFINITION.copy()
resp = self.app.put_json('/models/toto', model,
headers=self.headers)
self.assertIn("id", resp.json)
self.assertEqual("toto", resp.json["id"])
resp = self.app.put_json('/models/toto', model, status=401)
def test_definition_creation_rejects_malformed_json(self):
malformed_definition = '{"test":"toto", "titi": "tutu'
resp = self.app.put('/models/test',
{'definition': malformed_definition},
headers=self.headers,
status=400)
self.assertIn('"status": "error"', resp.body.decode('utf-8'))
def test_delete_model(self):
# 1. Test that the model and its records have been dropped
self.app.put_json('/models/test',
MODEL_DEFINITION,
headers=self.headers)
resp = self.app.put_json('/models/test/records/123456',
MODEL_RECORD, headers=self.headers)
record = MODEL_RECORD.copy()
record["id"] = resp.json["id"]
self.db.store_credentials('foo', {'id': 'alexis', 'key': 'bar'})
self.db.store_credentials('foobar', {'id': 'remy', 'key': 'bar'})
permissions = {self.credentials['id']: ["delete_all_records",
"delete_model"],
"alexis": ["read_permissions"],
"remy": ["update_permissions"]}
resp = self.app.put_json(
'/models/test/permissions',
permissions,
headers=self.headers)
resp = self.app.delete('/models/test', headers=self.headers)
# 2. Test that the returned data is right
self.assertEqual(resp.json, force_unicode({
'definition': MODEL_DEFINITION["definition"],
'records': [record],
'permissions': permissions}))
class RecordsViewsTest(BaseWebTest):
def test_deserialized_value_is_stored(self):
definition = copy.deepcopy(MODEL_DEFINITION)
definition['definition']['fields'] = [{
'name': 'updated',
'type': 'date',
'autonow': True}]
self.app.put_json('/models/test', definition,
headers=self.headers)
resp = self.app.post_json('/models/test/records', {},
headers=self.headers)
stored = self.db.get_record('test', resp.json['id'])
self.assertIsNotNone(stored.get('updated'))
def test_delete_model_records(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
self.app.post_json('/models/test/records', MODEL_RECORD,
headers=self.headers)
resp = self.app.delete('/models/test/records',
headers=self.headers)
self.assertIn("records", resp.json)
self.assertTrue(len(resp.json["records"]), 1)
self.assertIn("id", resp.json["records"][0])
self.assertEqual(resp.json["records"][0]["age"], 42)
def test_delete_unknown_model_records(self):
resp = self.app.delete('/models/unknown/records', {},
headers=self.headers,
status=404)
self.assertDictEqual(
resp.json, force_unicode({
"errors": [{
"location": "path",
"name": "unknown",
"description": "model not found"}],
"status": "error"}))
def test_unknown_model_raises_404(self):
resp = self.app.get('/models/unknown/records', {},
headers=self.headers,
status=404)
self.assertDictEqual(
resp.json, force_unicode({
"errors": [{
"location": "path",
"name": "unknown",
"description": "model not found"}],
"status": "error"}))
def test_unknown_model_records_creation(self):
resp = self.app.post_json('/models/unknown/records', {},
status=404)
self.assertDictEqual(
resp.json, force_unicode({
"errors": [{
"description": "Unknown model unknown",
"location": "path",
"name": "modelname"
}],
"status": 'error'}
))
def test_get_model_unknown(self):
resp = self.app.get('/models/test',
headers=self.headers, status=404)
self.assertDictEqual(
resp.json, force_unicode({
"errors": [{
"location": "path",
"name": "test",
"description": "model not found"}],
"status": "error"}))
def test_get_model_records(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
self.app.patch_json('/models/test/permissions',
{"Everyone": ["create_record", "read_own_records",
"delete_own_records"]},
headers=self.headers)
self.app.post_json('/models/test/records', MODEL_RECORD)
self.app.post_json('/models/test/records', MODEL_RECORD2,
headers=self.headers)
resp = self.app.get('/models/test/records',
headers=self.headers)
self.assertEqual(len(resp.json["records"]), 2)
resp = self.app.get('/models/test/records')
self.assertEqual(len(resp.json["records"]), 1)
def test_get_model_records_auto_json(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
self.app.patch_json('/models/test/permissions',
{"Everyone": ["create_record", "read_own_records",
"delete_own_records"]},
headers=self.headers)
self.app.post_json('/models/test/records', MODEL_RECORD)
self.app.post_json('/models/test/records', MODEL_RECORD2,
headers=self.headers)
resp = self.app.get('/models/test/records')
self.assertEqual(len(resp.json["records"]), 1)
def test_unknown_record_returns_404(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
resp = self.app.get('/models/test/records/1234',
headers=self.headers, status=404)
self.assertDictEqual(
resp.json, force_unicode({
"errors": [{
"location": "path",
"name": "1234",
"description": "record not found"}],
"status": "error"}))
def test_record_deletion(self):
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
resp = self.app.post_json('/models/test/records', MODEL_RECORD,
headers=self.headers)
record_id = resp.json['id']
# Test 200
resp = self.app.delete('/models/test/records/%s' % record_id,
headers=self.headers)
self.assertIn('id', resp.body.decode('utf-8'))
self.assertRaises(RecordNotFound, self.db.get_record,
'test', record_id)
# Test 404
resp = self.app.delete('/models/test/records/%s' % record_id,
headers=self.headers, status=404)
self.assertDictEqual(resp.json, force_unicode({
"errors": [{
"location": "path",
"name": record_id,
"description": "record not found"}],
"status": "error"}))
def assertStartsWith(self, a, b):
if not a.startswith(b):
self.fail("'%s' doesn't startswith '%s'" % (a, b))
class CreateTokenViewTest(BaseWebTest):
def setUp(self):
super(CreateTokenViewTest, self).setUp()
userpass = u'foolish:bar'.encode('ascii')
self.auth = base64.b64encode(userpass).strip().decode('ascii')
def test_post_token(self):
response = self.app.post('/tokens', status=201)
self.assertIn("token", response.json)
self.assertTrue(len(response.json["token"]) == 64)
self.assertIn("credentials", response.json)
self.assertIn("id", response.json["credentials"])
self.assertTrue(len(response.json["credentials"]["id"]) == 64)
self.assertIn("key", response.json["credentials"])
self.assertTrue(len(response.json["credentials"]["key"]) == 64)
self.assertEqual("sha256", response.json["credentials"]["algorithm"])
def test_post_token_with_basic_auth(self):
response = self.app.post('/tokens', status=201, headers={
'Authorization': 'Basic {0}'.format(self.auth)
})
credentials = response.json
response = self.app.post('/tokens', status=200, headers={
'Authorization': 'Basic {0}'.format(self.auth)
})
self.assertEqual(credentials, response.json)
def test_post_token_with_token_authorization(self):
response = self.app.post('/tokens', status=201, headers={
'Authorization': 'Token {0}'.format(self.auth)
})
credentials = response.json
response = self.app.post('/tokens', status=200, headers={
'Authorization': 'Token {0}'.format(self.auth)
})
self.assertEqual(credentials, response.json)
def test_post_token_is_not_the_same_for_basic_or_token(self):
response = self.app.post('/tokens', status=201, headers={
'Authorization': 'Token {0}'.format(self.auth)
})
credentials = response.json
response = self.app.post('/tokens', status=201, headers={
'Authorization': 'Basic {0}'.format(self.auth)
})
self.assertNotEqual(credentials, response.json)
class TokenViewTest(BaseWebTest):
def test_unauthorized_if_not_authenticated(self):
self.app.get('/token', status=401)
def test_unauthorized_if_invalid_credentials(self):
userpass = u'foolish:bar'.encode('ascii')
auth = base64.b64encode(userpass).strip().decode('ascii')
self.app.get('/token',
headers={
'Authorization': 'Basic {0}'.format(auth)
},
status=401)
def test_return_credentials_if_authenticated(self):
response = self.app.get('/token', headers=self.headers)
self.assertEqual(len(response.json['token']), 64)
self.assertDictEqual(response.json['credentials'],
self.credentials)
class SearchViewTest(BaseWebTest):
def setUp(self):
super(SearchViewTest, self).setUp()
self.app.put_json('/models/test', MODEL_DEFINITION,
headers=self.headers)
def test_search_returns_200_if_query_is_correct(self):
self.app.post('/models/test/search/', {'match_all': {}},
headers=self.headers,
status=200)
self.app.get('/models/test/search/', {'match_all': {}},
headers=self.headers,
status=200)
@mock.patch('elasticsearch.client.Elasticsearch.search')
def test_search_supports_query_string_parameters(self, search_mock):
search_mock.return_value = {}
query = {'match_all': {}}
self.app.post('/models/test/search/?size=100', query,
headers=self.headers,
status=200)
search_mock.called_with(index='test', doc_type='test',
body=query, size=100)
@mock.patch('elasticsearch.client.Elasticsearch.search')
def test_search_ignores_unsupported_parameters(self, search_mock):
search_mock.return_value = {}
query = {'match_all': {}}
self.app.get('/models/test/search/?size=1&from_=1&routing=a,b', query,
headers=self.headers,
status=200)
search_mock.called_with(index='test', doc_type='test',
body=query, size=1, from_=1)
def test_search_returns_404_if_model_unknown(self):
self.app.get('/models/unknown/search/', {},
headers=self.headers,
status=404)
@mock.patch('elasticsearch.client.Elasticsearch.search')
def test_search_returns_502_if_elasticsearch_fails(self, search_mock):
search_mock.side_effect = Exception('Not available')
self.app.get('/models/test/search/', {},
headers=self.headers,
status=502)
@mock.patch('elasticsearch.client.Elasticsearch.search')
def test_search_returns_original_code_on_bad_request(self, search_mock):
badrequest = elasticsearch.RequestError('400', 'error', {'foo': 'bar'})
search_mock.side_effect = badrequest
resp = self.app.get('/models/test/search/', {},
headers=self.headers,
status=400)
self.assertEqual(resp.json['msg']['foo'], 'bar')
def test_search_view_requires_permission(self):
self.app.patch_json('/models/test/permissions',
{self.credentials['id']: ["-read_all_records"]},
headers=self.headers)
self.app.get('/models/test/search/', {},
headers=self.headers,
status=403)
class CORSHeadersTest(BaseWebTest):
def setUp(self):
super(CORSHeadersTest, self).setUp()
self.headers['Origin'] = 'notmyidea.org'
def test_support_on_options_404(self):
headers = self.headers.copy()
headers['Access-Control-Request-Method'] = 'GET'
response = self.app.options('/models/unknown/definition',
headers=headers,
status=200)
self.assertIn('Access-Control-Allow-Origin', response.headers)
def test_support_on_get_unknown_model(self):
response = self.app.get('/models/unknown/definition',
headers=self.headers,
status=404)
self.assertIn('Access-Control-Allow-Origin', response.headers)
def test_support_on_valid_definition(self):
response = self.app.put_json('/models/test',
MODEL_DEFINITION,
headers=self.headers,
status=200)
self.assertIn('Access-Control-Allow-Origin', response.headers)
def test_support_on_invalid_definition(self):
definition = copy.deepcopy(MODEL_DEFINITION)
definition['definition'].pop('fields')
response = self.app.put_json('/models/test',
definition,
headers=self.headers,
status=400)
self.assertIn('Access-Control-Allow-Origin', response.headers)
def test_support_on_unauthorized(self):
response = self.app.get('/token',
MODEL_DEFINITION,
headers={'Origin': 'notmyidea.org'},
status=401)
self.assertIn('Access-Control-Allow-Origin', response.headers)
def test_support_on_forbidden(self):
self.app.put_json('/models/test',
MODEL_DEFINITION,
headers=self.headers,
status=200)
self.app.patch_json('/models/test/permissions',
{self.credentials['id']: ["-ALL"]},
headers=self.headers,
status=200)
response = self.app.get('/models/test',
headers=self.headers,
status=403)
self.assertIn('Access-Control-Allow-Origin', response.headers)
|
|
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The ios_l2_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.ios.facts.facts import Facts
from ansible.module_utils.network.ios.utils.utils import dict_to_set
from ansible.module_utils.network.ios.utils.utils import remove_command_from_config_list, add_command_to_config_list
from ansible.module_utils.network.ios.utils.utils import filter_dict_having_none_value, remove_duplicate_interface
class L2_Interfaces(ConfigBase):
"""
The ios_l2_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'l2_interfaces',
]
access_cmds = {'access_vlan': 'switchport access vlan'}
trunk_cmds = {'encapsulation': 'switchport trunk encapsulation', 'pruning_vlans': 'switchport trunk pruning vlan',
'native_vlan': 'switchport trunk native vlan', 'allowed_vlans': 'switchport trunk allowed vlan'}
def get_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
interfaces_facts = facts['ansible_network_resources'].get('l2_interfaces')
if not interfaces_facts:
return []
return interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from moduel execution
"""
result = {'changed': False}
commands = []
warnings = []
existing_facts = self.get_interfaces_facts()
commands.extend(self.set_config(existing_facts))
result['before'] = existing_facts
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
interfaces_facts = self.get_interfaces_facts()
if result['changed']:
result['after'] = interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the deisred configuration
"""
want = self._module.params['config']
have = existing_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the deisred configuration
"""
commands = []
state = self._module.params['state']
if state in ('overridden', 'merged', 'replaced') and not want:
self._module.fail_json(msg='value of config parameter must not be empty for state {0}'.format(state))
if state == 'overridden':
commands = self._state_overridden(want, have, self._module)
elif state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
commands = self._state_merged(want, have, self._module)
elif state == 'replaced':
commands = self._state_replaced(want, have, self._module)
return commands
def _state_replaced(self, want, have, module):
""" The command generator when state is replaced
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:param interface_type: interface type
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the deisred configuration
"""
commands = []
for interface in want:
for each in have:
if each['name'] == interface['name']:
break
else:
continue
have_dict = filter_dict_having_none_value(interface, each)
commands.extend(self._clear_config(dict(), have_dict))
commands.extend(self._set_config(interface, each, module))
# Remove the duplicate interface call
commands = remove_duplicate_interface(commands)
return commands
def _state_overridden(self, want, have, module):
""" The command generator when state is overridden
:param want: the desired configuration as a dictionary
:param obj_in_have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for each in have:
for interface in want:
if each['name'] == interface['name']:
break
else:
# We didn't find a matching desired state, which means we can
# pretend we recieved an empty desired state.
interface = dict(name=each['name'])
kwargs = {'want': interface, 'have': each}
commands.extend(self._clear_config(**kwargs))
continue
have_dict = filter_dict_having_none_value(interface, each)
commands.extend(self._clear_config(dict(), have_dict))
commands.extend(self._set_config(interface, each, module))
# Remove the duplicate interface call
commands = remove_duplicate_interface(commands)
return commands
def _state_merged(self, want, have, module):
""" The command generator when state is merged
:param want: the additive configuration as a dictionary
:param obj_in_have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
for interface in want:
for each in have:
if each['name'] == interface['name']:
break
else:
continue
commands.extend(self._set_config(interface, each, module))
return commands
def _state_deleted(self, want, have):
""" The command generator when state is deleted
:param want: the objects from which the configuration should be removed
:param obj_in_have: the current configuration as a dictionary
:param interface_type: interface type
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
if want:
for interface in want:
for each in have:
if each['name'] == interface['name']:
break
else:
continue
interface = dict(name=interface['name'])
commands.extend(self._clear_config(interface, each))
else:
for each in have:
want = dict()
commands.extend(self._clear_config(want, each))
return commands
def _check_for_correct_vlan_range(self, vlan, module):
# Function to check if the VLAN range passed is Valid
for each in vlan:
vlan_range = each.split('-')
if len(vlan_range) > 1:
if vlan_range[0] < vlan_range[1]:
return True
else:
module.fail_json(msg='Command rejected: Bad VLAN list - end of range not larger than the'
' start of range!')
else:
return True
def _set_config(self, want, have, module):
# Set the interface config based on the want and have config
commands = []
interface = 'interface ' + want['name']
# Get the diff b/w want and have
want_dict = dict_to_set(want)
have_dict = dict_to_set(have)
want_trunk = dict(want_dict).get('trunk')
have_trunk = dict(have_dict).get('trunk')
if want_trunk and have_trunk:
diff = set(tuple(dict(want_dict).get('trunk'))) - set(tuple(dict(have_dict).get('trunk')))
else:
diff = want_dict - have_dict
if diff:
diff = dict(diff)
if diff.get('access'):
cmd = 'switchport access vlan {0}'.format(diff.get('access')[0][1])
add_command_to_config_list(interface, cmd, commands)
if want_trunk:
if diff.get('trunk'):
diff = dict(diff.get('trunk'))
if diff.get('encapsulation'):
cmd = self.trunk_cmds['encapsulation'] + ' {0}'.format(diff.get('encapsulation'))
add_command_to_config_list(interface, cmd, commands)
if diff.get('native_vlan'):
cmd = self.trunk_cmds['native_vlan'] + ' {0}'.format(diff.get('native_vlan'))
add_command_to_config_list(interface, cmd, commands)
allowed_vlans = diff.get('allowed_vlans')
pruning_vlans = diff.get('pruning_vlans')
if allowed_vlans and self._check_for_correct_vlan_range(allowed_vlans, module):
allowed_vlans = ','.join(allowed_vlans)
cmd = self.trunk_cmds['allowed_vlans'] + ' {0}'.format(allowed_vlans)
add_command_to_config_list(interface, cmd, commands)
if pruning_vlans and self._check_for_correct_vlan_range(pruning_vlans, module):
pruning_vlans = ','.join(pruning_vlans)
cmd = self.trunk_cmds['pruning_vlans'] + ' {0}'.format(pruning_vlans)
add_command_to_config_list(interface, cmd, commands)
return commands
def _clear_config(self, want, have):
# Delete the interface config based on the want and have config
commands = []
if want.get('name'):
interface = 'interface ' + want['name']
else:
interface = 'interface ' + have['name']
if have.get('access') and want.get('access') is None:
remove_command_from_config_list(interface, L2_Interfaces.access_cmds['access_vlan'], commands)
elif have.get('access') and want.get('access'):
if have.get('access').get('vlan') != want.get('access').get('vlan'):
remove_command_from_config_list(interface, L2_Interfaces.access_cmds['access_vlan'], commands)
if have.get('trunk') and want.get('trunk') is None:
# Check when no config is passed
if have.get('trunk').get('encapsulation'):
remove_command_from_config_list(interface, self.trunk_cmds['encapsulation'], commands)
if have.get('trunk').get('native_vlan'):
remove_command_from_config_list(interface, self.trunk_cmds['native_vlan'], commands)
if have.get('trunk').get('allowed_vlans'):
remove_command_from_config_list(interface, self.trunk_cmds['allowed_vlans'], commands)
if have.get('trunk').get('pruning_vlans'):
remove_command_from_config_list(interface, self.trunk_cmds['pruning_vlans'], commands)
elif have.get('trunk') and want.get('trunk'):
# Check when config is passed, also used in replaced and override state
if have.get('trunk').get('encapsulation')\
and have.get('trunk').get('encapsulation') != want.get('trunk').get('encapsulation'):
remove_command_from_config_list(interface, self.trunk_cmds['encapsulation'], commands)
if have.get('trunk').get('native_vlan') \
and have.get('trunk').get('native_vlan') != want.get('trunk').get('native_vlan'):
remove_command_from_config_list(interface, self.trunk_cmds['native_vlan'], commands)
if have.get('trunk').get('allowed_vlans') \
and have.get('trunk').get('allowed_vlans') != want.get('trunk').get('allowed_vlans'):
remove_command_from_config_list(interface, self.trunk_cmds['allowed_vlans'], commands)
if have.get('trunk').get('pruning_vlans') \
and have.get('trunk').get('pruning_vlans') != want.get('trunk').get('pruning_vlans'):
remove_command_from_config_list(interface, self.trunk_cmds['pruning_vlans'], commands)
return commands
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
import six
from webob import exc
from jacket.api.compute.openstack import api_version_request as api_version
from jacket.api.compute.openstack.compute import flavor_access \
as flavor_access_v21
from jacket.api.compute.openstack.compute.legacy_v2.contrib import flavor_access \
as flavor_access_v2
from jacket.api.compute.openstack.compute.legacy_v2 import flavors as flavors_api
from jacket import context
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
def generate_flavor(flavorid, ispublic):
return {
'id': flavorid,
'flavorid': str(flavorid),
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'memory_mb': 512,
'vcpus': 1,
'swap': 512,
'rxtx_factor': 1.0,
'disabled': False,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'is_public': bool(ispublic)
}
INSTANCE_TYPES = {
'0': generate_flavor(0, True),
'1': generate_flavor(1, True),
'2': generate_flavor(2, False),
'3': generate_flavor(3, False)}
ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
{'flavor_id': '2', 'project_id': 'proj3'},
{'flavor_id': '3', 'project_id': 'proj3'}]
def fake_get_flavor_access_by_flavor_id(context, flavorid):
res = []
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid:
res.append(access)
return res
def fake_get_flavor_by_flavor_id(context, flavorid, read_deleted=None):
return INSTANCE_TYPES[flavorid]
def _has_flavor_access(flavorid, projectid):
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid and \
access['project_id'] == projectid:
return True
return False
def fake_get_all_flavors_sorted_list(context, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if filters is None or filters['is_public'] is None:
return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
res = {}
for k, v in six.iteritems(INSTANCE_TYPES):
if filters['is_public'] and _has_flavor_access(k, context.project_id):
res.update({k: v})
continue
if v['is_public'] == filters['is_public']:
res.update({k: v})
res = sorted(res.values(), key=lambda item: item[sort_key])
return res
class FakeRequest(object):
environ = {"compute.context": context.get_admin_context()}
api_version_request = api_version.APIVersionRequest("2.1")
def get_db_flavor(self, flavor_id):
return INSTANCE_TYPES[flavor_id]
def is_legacy_v2(self):
return False
class FakeResponse(object):
obj = {'flavor': {'id': '0'},
'flavors': [
{'id': '0'},
{'id': '2'}]
}
def attach(self, **kwargs):
pass
class FlavorAccessTestV21(test.NoDBTestCase):
api_version = "2.1"
FlavorAccessController = flavor_access_v21.FlavorAccessController
FlavorActionController = flavor_access_v21.FlavorActionController
_prefix = "/v2/fake"
validation_ex = exception.ValidationError
def setUp(self):
super(FlavorAccessTestV21, self).setUp()
self.flavor_controller = flavors_api.Controller()
self.req = FakeRequest()
self.req.environ = {"compute.context": context.RequestContext('fake_user',
'fake')}
self.stub_out('compute.db.flavor_get_by_flavor_id',
fake_get_flavor_by_flavor_id)
self.stub_out('compute.db.flavor_get_all',
fake_get_all_flavors_sorted_list)
self.stub_out('compute.db.flavor_access_get_by_flavor_id',
fake_get_flavor_access_by_flavor_id)
self.flavor_access_controller = self.FlavorAccessController()
self.flavor_action_controller = self.FlavorActionController()
def _verify_flavor_list(self, result, expected):
# result already sorted by flavor_id
self.assertEqual(len(result), len(expected))
for d1, d2 in zip(result, expected):
self.assertEqual(d1['id'], d2['id'])
def test_list_flavor_access_public(self):
# query os-flavor-access on public flavor should return 404
self.assertRaises(exc.HTTPNotFound,
self.flavor_access_controller.index,
self.req, '1')
def test_list_flavor_access_private(self):
expected = {'flavor_access': [
{'flavor_id': '2', 'tenant_id': 'proj2'},
{'flavor_id': '2', 'tenant_id': 'proj3'}]}
result = self.flavor_access_controller.index(self.req, '2')
self.assertEqual(result, expected)
def test_list_flavor_with_admin_default_proj1(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=True)
req.environ['compute.context'].project_id = 'proj1'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_default_proj2(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=True)
req.environ['compute.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=true'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false_proj2(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
req.environ['compute.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
{'id': '3'}]}
url = self._prefix + '/flavors?is_public=none'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_default(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=true'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_false(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=none'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_show(self):
resp = FakeResponse()
self.flavor_action_controller.show(self.req, resp, '0')
self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
resp.obj['flavor'])
self.flavor_action_controller.show(self.req, resp, '2')
self.assertEqual({'id': '0', 'os-flavor-access:is_public': False},
resp.obj['flavor'])
def test_detail(self):
resp = FakeResponse()
self.flavor_action_controller.detail(self.req, resp)
self.assertEqual([{'id': '0', 'os-flavor-access:is_public': True},
{'id': '2', 'os-flavor-access:is_public': False}],
resp.obj['flavors'])
def test_create(self):
resp = FakeResponse()
self.flavor_action_controller.create(self.req, {}, resp)
self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
resp.obj['flavor'])
def _get_add_access(self):
if self.api_version == "2.1":
return self.flavor_action_controller._add_tenant_access
else:
return self.flavor_action_controller._addTenantAccess
def _get_remove_access(self):
if self.api_version == "2.1":
return self.flavor_action_controller._remove_tenant_access
else:
return self.flavor_action_controller._removeTenantAccess
def test_add_tenant_access(self):
def stub_add_flavor_access(context, flavorid, projectid):
self.assertEqual('3', flavorid, "flavorid")
self.assertEqual("proj2", projectid, "projectid")
self.stub_out('compute.db.flavor_access_add',
stub_add_flavor_access)
expected = {'flavor_access':
[{'flavor_id': '3', 'tenant_id': 'proj3'}]}
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
add_access = self._get_add_access()
result = add_access(req, '3', body=body)
self.assertEqual(result, expected)
@mock.patch('compute.objects.Flavor.get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='1'))
def test_add_tenant_access_with_flavor_not_found(self, mock_get):
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
add_access = self._get_add_access()
self.assertRaises(exc.HTTPNotFound,
add_access, req, '2', body=body)
def test_add_tenant_access_with_no_tenant(self):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'addTenantAccess': {'foo': 'proj2'}}
add_access = self._get_add_access()
self.assertRaises(self.validation_ex,
add_access, req, '2', body=body)
body = {'addTenantAccess': {'tenant': ''}}
self.assertRaises(self.validation_ex,
add_access, req, '2', body=body)
def test_add_tenant_access_with_already_added_access(self):
def stub_add_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessExists(flavor_id=flavorid,
project_id=projectid)
self.stub_out('compute.db.flavor_access_add',
stub_add_flavor_access)
body = {'addTenantAccess': {'tenant': 'proj2'}}
add_access = self._get_add_access()
self.assertRaises(exc.HTTPConflict,
add_access, self.req, '3', body=body)
def test_remove_tenant_access_with_bad_access(self):
def stub_remove_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessNotFound(flavor_id=flavorid,
project_id=projectid)
self.stub_out('compute.db.flavor_access_remove',
stub_remove_flavor_access)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
remove_access = self._get_remove_access()
self.assertRaises(exc.HTTPNotFound,
remove_access, self.req, '3', body=body)
def test_add_tenant_access_is_public(self):
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
req.api_version_request = api_version.APIVersionRequest('2.7')
add_access = self._get_add_access()
self.assertRaises(exc.HTTPConflict,
add_access, req, '1', body=body)
def test_delete_tenant_access_with_no_tenant(self):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
remove_access = self._get_remove_access()
body = {'removeTenantAccess': {'foo': 'proj2'}}
self.assertRaises(self.validation_ex,
remove_access, req, '2', body=body)
body = {'removeTenantAccess': {'tenant': ''}}
self.assertRaises(self.validation_ex,
remove_access, req, '2', body=body)
class FlavorAccessTestV20(FlavorAccessTestV21):
api_version = "2.0"
FlavorAccessController = flavor_access_v2.FlavorAccessController
FlavorActionController = flavor_access_v2.FlavorActionController
validation_ex = exc.HTTPBadRequest
def setUp(self):
super(FlavorAccessTestV20, self).setUp()
self.req = FakeRequest()
self.req.environ = {"compute.context": context.get_admin_context()}
def test_remove_tenant_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=False)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
remove_access = self._get_remove_access()
self.assertRaises(exception.AdminRequired,
remove_access, req, '2', body=body)
def test_add_tenant_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=False)
body = {'addTenantAccess': {'tenant': 'proj2'}}
add_access = self._get_add_access()
self.assertRaises(exception.AdminRequired,
add_access, req, '2', body=body)
def test_list_with_no_admin(self):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/fake/flavors')
self.assertRaises(exception.AdminRequired,
self.flavor_access_controller.index,
req, 'fake')
@mock.patch('compute.objects.Flavor.add_access')
def test_add_tenant_access_is_public(self, mock_add):
# V2 don't test public before add access
expected = {'flavor_access':
[{'flavor_id': '3', 'tenant_id': 'proj3'}]}
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
add_access = self._get_add_access()
result = add_access(req, '3', body=body)
mock_add.assert_called_once_with('proj2')
self.assertEqual(result, expected)
@mock.patch('compute.objects.Flavor.add_access')
def test_add_tenant_access_with_flavor_not_found(self, mock_add):
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
add_access = self._get_add_access()
add_access(req, '1000', body=body)
mock_add.assert_called_once_with('proj2')
class FlavorAccessPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FlavorAccessPolicyEnforcementV21, self).setUp()
self.act_controller = flavor_access_v21.FlavorActionController()
self.access_controller = flavor_access_v21.FlavorAccessController()
self.req = fakes.HTTPRequest.blank('')
def test_add_tenant_access_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access:add_tenant_access"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.act_controller._add_tenant_access, self.req, fakes.FAKE_UUID,
body={'addTenantAccess': {'tenant': fakes.FAKE_UUID}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_remove_tenant_access_policy_failed(self):
rule_name = ("os_compute_api:os-flavor-access:"
"remove_tenant_access")
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.act_controller._remove_tenant_access, self.req,
fakes.FAKE_UUID,
body={'removeTenantAccess': {'tenant': fakes.FAKE_UUID}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_extend_create_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
self.act_controller.create(self.req, None, None)
def test_extend_show_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
self.act_controller.show(self.req, None, None)
def test_extend_detail_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
self.act_controller.detail(self.req, None)
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.access_controller.index, self.req,
fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
import numpy as np
from .. import util
from ..constants import log
from ..constants import tol_path as tol
from ..constants import res_path as res
def arc_center(points, return_normal=True, return_angle=True):
"""
Given three points on a 2D or 3D arc find the center,
radius, normal, and angular span.
Parameters
---------
points : (3, dimension) float
Points in space, where dimension is either 2 or 3
return_normal : bool
If True calculate the 3D normal unit vector
return_angle : bool
If True calculate the start and stop angle and span
Returns
---------
result : dict
Contains arc center and other keys:
'center' : (d,) float, cartesian center of the arc
'radius' : float, radius of the arc
'normal' : (3,) float, the plane normal.
'angles' : (2,) float, angle of start and end in radians
'span' : float, angle swept by the arc in radians
"""
points = np.asanyarray(points, dtype=np.float64)
# get the vectors between the arc points
A, B, C = points
CB = C - B
CA = C - A
BA = B - A
# the lengths of those edges
a = np.linalg.norm(CB)
b = np.linalg.norm(CA)
c = np.linalg.norm(BA)
# perform radius calculation scaled to shortest edge
# to avoid precision issues with small or large arcs
scale = min([a, b, c])
# get the edge lengths scaled to the smallest
edges = np.array([a, b, c]) / scale
# half the total length of the edges
half = edges.sum() / 2.0
# check the denominator for the radius calculation
denom = half * np.product(half - edges)
if denom < tol.merge:
raise ValueError('arc is colinear!')
# find the radius and scale back after the operation
radius = scale * ((np.product(edges) / 4.0) / np.sqrt(denom))
# run the center calculation
a2 = a**2
b2 = b**2
c2 = c**2
# barycentric approach
ba = [a2 * (b2 + c2 - a2),
b2 * (a2 + c2 - b2),
c2 * (a2 + b2 - c2)]
center = (points.T).dot(ba) / sum(ba)
if tol.strict:
# all points should be at the calculated radius from center
assert np.allclose(
np.linalg.norm(points - center, axis=1),
radius)
# start with initial results
result = {'center': center,
'radius': radius}
# exit early if we can
if not (return_normal or return_angle):
return result
if return_normal:
if points.shape == (3, 2):
# for 2D arcs still use the cross product so that
# the sign of the normal vector is consistent
result['normal'] = util.unitize(
np.cross(np.append(CA, 0), np.append(BA, 0)))
else:
# otherwise just take the cross product
result['normal'] = util.unitize(
np.cross(CA, BA))
if return_angle:
# vectors from points on arc to center point
vector = util.unitize(points - center)
edge_direction = np.diff(points, axis=0)
# find the angle between the first and last vector
dot = np.dot(*vector[[0, 2]])
if dot < (tol.zero - 1):
angle = np.pi
elif dot > 1 - tol.zero:
angle = 0.0
else:
angle = np.arccos(dot)
# if the angle is nonzero and vectors are opposite direction
# it means we have a long arc rather than the short path
if abs(angle) > tol.zero and np.dot(*edge_direction) < 0.0:
angle = (np.pi * 2) - angle
# convoluted angle logic
angles = np.arctan2(*vector[:, :2].T[::-1]) + np.pi * 2
angles_sorted = np.sort(angles[[0, 2]])
reverse = angles_sorted[0] < angles[1] < angles_sorted[1]
angles_sorted = angles_sorted[::(1 - int(not reverse) * 2)]
result['angles'] = angles_sorted
result['span'] = angle
return result
def discretize_arc(points,
close=False,
scale=1.0):
"""
Returns a version of a three point arc consisting of
line segments.
Parameters
---------
points : (3, d) float
Points on the arc where d in [2,3]
close : boolean
If True close the arc into a circle
scale : float
What is the approximate overall drawing scale
Used to establish order of magnitude for precision
Returns
---------
discrete : (m, d) float
Connected points in space
"""
# make sure points are (n, 3)
points, is_2D = util.stack_3D(points, return_2D=True)
# find the center of the points
try:
# try to find the center from the arc points
center_info = arc_center(points)
except BaseException:
# if we hit an exception return a very bad but
# technically correct discretization of the arc
if is_2D:
return points[:, :2]
return points
center, R, N, angle = (center_info['center'],
center_info['radius'],
center_info['normal'],
center_info['span'])
# if requested, close arc into a circle
if close:
angle = np.pi * 2
# the number of facets, based on the angle criteria
count_a = angle / res.seg_angle
count_l = ((R * angle)) / (res.seg_frac * scale)
# figure out the number of line segments
count = np.max([count_a, count_l])
# force at LEAST 4 points for the arc
# otherwise the endpoints will diverge
count = np.clip(count, 4, np.inf)
count = int(np.ceil(count))
V1 = util.unitize(points[0] - center)
V2 = util.unitize(np.cross(-N, V1))
t = np.linspace(0, angle, count)
discrete = np.tile(center, (count, 1))
discrete += R * np.cos(t).reshape((-1, 1)) * V1
discrete += R * np.sin(t).reshape((-1, 1)) * V2
# do an in-process check to make sure result endpoints
# match the endpoints of the source arc
if not close:
arc_dist = util.row_norm(points[[0, -1]] - discrete[[0, -1]])
arc_ok = (arc_dist < tol.merge).all()
if not arc_ok:
log.warning(
'failed to discretize arc (endpoint_distance=%s R=%s)',
str(arc_dist), R)
log.warning('Failed arc points: %s', str(points))
raise ValueError('Arc endpoints diverging!')
discrete = discrete[:, :(3 - is_2D)]
return discrete
def to_threepoint(center, radius, angles=None):
"""
For 2D arcs, given a center and radius convert them to three
points on the arc.
Parameters
-----------
center : (2,) float
Center point on the plane
radius : float
Radius of arc
angles : (2,) float
Angles in radians for start and end angle
if not specified, will default to (0.0, pi)
Returns
----------
three : (3, 2) float
Arc control points
"""
# if no angles provided assume we want a half circle
if angles is None:
angles = [0.0, np.pi]
# force angles to float64
angles = np.asanyarray(angles, dtype=np.float64)
if angles.shape != (2,):
raise ValueError('angles must be (2,)!')
# provide the wrap around
if angles[1] < angles[0]:
angles[1] += np.pi * 2
center = np.asanyarray(center, dtype=np.float64)
if center.shape != (2,):
raise ValueError('only valid on 2D arcs!')
# turn the angles of [start, end]
# into [start, middle, end]
angles = np.array([angles[0],
angles.mean(),
angles[1]],
dtype=np.float64)
# turn angles into (3, 2) points
three = (np.column_stack(
(np.cos(angles),
np.sin(angles))) * radius) + center
return three
|
|
from pylab import find
import numpy as np
from numpy import linalg, zeros, array, vstack, ones, sum, power, uint32, insert, matrix
from warnings import catch_warnings, simplefilter
#skimage.transform
# http://stackoverflow.com/questions/11462781/fast-2d-rigid-body-transformations-in-numpy-scipy
# skimage.transform.fast_homography(im, H)
# Generate 6 degrees of freedom homography transformation
def compute_homog(xyz_norm1, xyz_norm2):
'Computes homography from normalized (0 to 1) point correspondences'
num_pts = xyz_norm1.shape[1]
assert xyz_norm1.shape == xyz_norm2.shape, ''
assert xyz_norm1.shape[0] == 3, ''
Mbynine = zeros((2*num_pts,9), dtype=np.float32)
for ilx in xrange(num_pts): # Loop over inliers
# Concatinate all 2x9 matrices into an Mx9 matrcx
u2 = xyz_norm2[0,ilx]
v2 = xyz_norm2[1,ilx]
(d,e,f) = -xyz_norm1[:,ilx]
(g,h,i) = v2*xyz_norm1[:,ilx]
(j,k,l) = xyz_norm1[:,ilx]
(p,q,r) = -u2*xyz_norm1[:,ilx]
Mbynine[ilx*2:(ilx+1)*2,:] = array(\
[(0, 0, 0, d, e, f, g, h, i),
(j, k, l, 0, 0, 0, p, q, r) ] )
# Solve for the nullspace of the Mbynine
try:
(_U, _s, V) = linalg.svd(Mbynine)
except MemoryError:
# TODO: is sparse calculation faster than not?
print('Singular Value Decomposition Ran Out of Memory. Trying with a sparse matrix')
import scipy.sparse
import scipy.sparse.linalg
MbynineSparse = scipy.sparse.lil_matrix(Mbynine)
(_U, _s, V) = scipy.sparse.linalg.svds(MbynineSparse)
#import gc
#gc.collect()
#print('Singular Value Decomposition Ran Out of Memory.'+\
#'Trying to free some memory with garbage collection')
#(_U, _s, V) = linalg.svd(Mbynine)
#import pdb
#pdb.set_trace()
# Rearange the nullspace into a homography
h = V[-1,:] # (transposed in matlab)
H = vstack( ( h[0:3], h[3:6], h[6:9] ) )
return H
#
def _homogonize_pts(xy):
'Adds a 3rd dimension of ones to xy-position vectors'
assert xy.shape[0] == 2, ''
xyz = vstack([xy, ones(xy.shape[1])]);
return xyz
#
def _normalize_pts(xyz):
'Returns a transformation to normalize points to mean=0, stddev=1'
num_xyz = xyz.shape[1]
com = sum(xyz,axis=1) / num_xyz # center of mass
with catch_warnings():
simplefilter("ignore")
sx = num_xyz / sum(abs(xyz[0,:]-com[0])) # average xy magnitude
sy = num_xyz / sum(abs(xyz[1,:]-com[1]))
tx = -com[0]*sx
ty = -com[1]*sy
T = array([(sx, 0, tx), (0, sy, ty), (0, 0, 1)])
return T
#
def homogo_normalize_pts(xy):
'Homoginize points for stable homography estimation'
xyz = _homogonize_pts(xy)
T = _normalize_pts(xyz)
xyz_norm = T.dot(xyz)
return (xyz_norm, T)
#
def get_affine_inliers_RANSAC(num_m, xy1_m, xy2_m,\
acd1_m, acd2_m, xy_thresh_sqrd, sigma_thresh_sqrd=None):
'''Computes initial inliers by iteratively computing affine transformations
between matched keypoints'''
aff_inliers = []
# Enumerate All Hypothesis (Match transformations)
for mx in xrange(num_m):
xy1 = xy1_m[:,mx].reshape(2,1) # XY Positions
xy2 = xy2_m[:,mx].reshape(2,1)
A1 = matrix(insert(acd1_m[:,mx], [1.], 0.)).reshape(2,2)
A2 = matrix(insert(acd2_m[:,mx], [1.], 0.)).reshape(2,2)
# Compute Affine Tranform
# from img1 to img2 = (E2\E1)
Aff = linalg.inv(A2).dot(A1)
#
# Transform XY-Positions
xy1_mAt = xy2 + Aff.dot( (xy1_m - xy1) )
xy_err_sqrd = sum( power(xy1_mAt - xy2_m, 2) , 0)
_inliers = find(xy_err_sqrd < xy_thresh_sqrd)
#
# Transform Ellipse Geometry (solved on paper)
if not sigma_thresh_sqrd is None:
scale1_mAt = (acd1_m[0]*Aff[0,0]) *\
(acd1_m[1]*Aff[1,0]+acd1_m[2]*Aff[1,1])
scale2_m = acd2_m[0] * acd2_m[2]
scale_err = np.abs(scale1_mAt - scale2_m)
_inliers_scale = find(scale_err < sigma_thresh_sqrd)
_inliers = np.bitwise_and(_inliers, _inliers_scale)
#If this hypothesis transformation is better than the ones we have
#previously seen then set it as the best
if len(_inliers) > len(aff_inliers):
aff_inliers = _inliers
#bst_xy_err = xy_err_sqrd
return aff_inliers
def homog_warp_shape(H3x3, acd):
#acd = np.array([(1,2,3,4,5,6,7,8,9,0),(9,8,7,6,5,4,3,2,1,0),(9,1,8,2,7,3,0,5,4,6)])
#H3x3 = np.matrix('[1, 2, 3; 4, 5, 6; 7, 8, 9]')
num_shapes = acd.shape[1]
# Allocate Space for a return matrix and a stacked operation matrix
shape2x2 = np.empty((num_shapes, 2,2))
shape3x3 = np.zeros((num_shapes, 3,3))
# Fill the operation matrix, to do the multiply in one operation
shape3x3[:,2,2] = 1
shape3x3[:,0,0] = acd[0]
shape3x3[:,1,0] = acd[1]
shape3x3[:,1,1] = acd[2]
shape3x3.shape = (3*num_shapes, 3)
# Transform Stacked Matrix
H_shape3x3 = H3x3.dot(np.transpose(shape3x3)).getA()
# Insert Warped Shape components into return array
# This discards the transformation information (unsure if this is the right
# way to go about it)
shape2x2[:,0,0] = H_shape3x3[0,0::3]
shape2x2[:,0,1] = H_shape3x3[0,1::3]
shape2x2[:,1,0] = H_shape3x3[1,0::3]
shape2x2[:,1,1] = H_shape3x3[1,1::3]
# Return an array of 2x2 shapes
return shape2x2
def homog_warp(H3x3, xy):
xyz = _homogonize_pts(xy)
H_xyz = H3x3.dot(xyz)
with catch_warnings():
simplefilter("ignore")
H_xy = H_xyz[0:2,:] / H_xyz[2,:]
return H_xy
def ransac(fpts1_match, fpts2_match,\
xy_thresh_sqrd = None,\
sigma_thresh = None,\
theta_thresh = None):
''' RanSaC:
Ransom Sample Consensus Inlier Generator
- Object retrieval fast, Philbin1, Chum1, et al '''
assert len(fpts1_match) == len(fpts2_match), 'RanSaC works on matches!'
num_m = fpts1_match.shape[1] # num matches
nInlier_thresh = 3
if num_m < nInlier_thresh:
# there are not enough matches to be spatially invalid
inliers = ones(num_m, dtype=uint32)
return inliers
#print('''
#RANSAC Resampling matches on %r chips
#* xy_thresh2 = %r * (chip diagonal length)
#* theta_thresh = %r (orientation)
#* sigma_thresh = %r (scale)''' % (num_m, xy_thresh_sqrd, theta_thresh, sigma_thresh))
if num_m == 0:
return zeros(num_m, dtype=uint32)
xy1_m = fpts1_match[0:2,:] # keypoint xy coordinates matches
xy2_m = fpts2_match[0:2,:]
acd1_m = fpts1_match[2:5,:] # keypoint shape matrix [a 0; c d] matches
acd2_m = fpts2_match[2:5,:]
# Compute affine inliers using exhaustive ransac
aff_inliers = get_affine_inliers_RANSAC(num_m, xy1_m, xy2_m,\
acd1_m, acd2_m, xy_thresh_sqrd, sigma_thresh_sqrd=None)
if len(aff_inliers) < nInlier_thresh:
# Cannot establish a better correspondence
return aff_inliers
# Homogonize+Normalize
(xyz_norm1, T1) = homogo_normalize_pts(xy1_m[:,aff_inliers])
(xyz_norm2, T2) = homogo_normalize_pts(xy2_m[:,aff_inliers])
# Compute Normalized Homog
try:
H_prime = compute_homog(xyz_norm1, xyz_norm2)
# Unnormalize
H = linalg.solve(T2, H_prime).dot(T1)
# Kpts1 in Kpts2-space
xy1_mHt = homog_warp(H, xy1_m)
# Final Inlier Errors
sqrd_dist_error = sum( (xy1_mHt - xy2_m)**2, axis=0)
inliers = sqrd_dist_error < xy_thresh_sqrd
except linalg.LinAlgError:
#logdbg('linalg.LinAlgError: Returning None')
return None
return inliers
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API"""
import datetime
from nova import test
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
def _setup_networking(instance_id, ip='1.2.3.4', flo_addr='1.2.1.2'):
ctxt = context.get_admin_context()
network_ref = db.project_get_networks(ctxt,
'fake',
associate=True)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_id}
vif_ref = db.virtual_interface_create(ctxt, vif)
fixed_ip = {'address': ip,
'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id'],
'allocated': True,
'instance_id': instance_id}
db.fixed_ip_create(ctxt, fixed_ip)
fix_ref = db.fixed_ip_get_by_address(ctxt, ip)
db.floating_ip_create(ctxt, {'address': flo_addr,
'fixed_ip_id': fix_ref['id']})
class DbApiTestCase(test.TestCase):
def setUp(self):
super(DbApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_instance_get_all_by_filters(self):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
db.instance_create(self.context, args)
db.instance_create(self.context, args)
result = db.instance_get_all_by_filters(self.context, {})
self.assertTrue(2, len(result))
def test_instance_get_all_by_filters_deleted(self):
args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'}
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context.elevated(), inst1['id'])
result = db.instance_get_all_by_filters(self.context.elevated(), {})
self.assertEqual(2, len(result))
self.assertIn(inst1.id, [result[0].id, result[1].id])
self.assertIn(inst2.id, [result[0].id, result[1].id])
if inst1.id == result[0].id:
self.assertTrue(result[0].deleted)
else:
self.assertTrue(result[1].deleted)
def test_migration_get_all_unconfirmed(self):
ctxt = context.get_admin_context()
# Ensure no migrations are returned.
results = db.migration_get_all_unconfirmed(ctxt, 10)
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
values = {"status": "FINISHED", "updated_at": updated_at}
migration = db.migration_create(ctxt, values)
results = db.migration_get_all_unconfirmed(ctxt, 10)
self.assertEqual(1, len(results))
db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = datetime.datetime.utcnow()
values = {"status": "FINISHED", "updated_at": updated_at}
migration = db.migration_create(ctxt, values)
results = db.migration_get_all_unconfirmed(ctxt, 10)
self.assertEqual(0, len(results))
db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"})
def test_instance_get_all_hung_in_rebooting(self):
ctxt = context.get_admin_context()
# Ensure no instances are returned.
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
self.assertEqual(0, len(results))
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
values = {"task_state": "rebooting", "updated_at": updated_at}
instance = db.instance_create(ctxt, values)
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
self.assertEqual(1, len(results))
db.instance_update(ctxt, instance.id, {"task_state": None})
# Ensure the newly rebooted instance is not returned.
updated_at = datetime.datetime.utcnow()
values = {"task_state": "rebooting", "updated_at": updated_at}
instance = db.instance_create(ctxt, values)
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
self.assertEqual(0, len(results))
db.instance_update(ctxt, instance.id, {"task_state": None})
def test_network_create_safe(self):
ctxt = context.get_admin_context()
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(ctxt, values)
self.assertNotEqual(None, network.uuid)
self.assertEqual(36, len(network.uuid))
db_network = db.network_get(ctxt, network.id)
self.assertEqual(network.uuid, db_network.uuid)
def test_network_create_with_duplicate_vlan(self):
ctxt = context.get_admin_context()
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
db.network_create_safe(ctxt, values1)
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, ctxt, values2)
def test_instance_update_with_instance_id(self):
""" test instance_update() works when an instance id is passed """
ctxt = context.get_admin_context()
# Create an instance with some metadata
metadata = {'host': 'foo'}
values = {'metadata': metadata}
instance = db.instance_create(ctxt, values)
# Update the metadata
metadata = {'host': 'bar'}
values = {'metadata': metadata}
db.instance_update(ctxt, instance.id, values)
# Retrieve the metadata to ensure it was successfully updated
instance_meta = db.instance_metadata_get(ctxt, instance.id)
self.assertEqual('bar', instance_meta['host'])
def test_instance_update_with_instance_uuid(self):
""" test instance_update() works when an instance UUID is passed """
ctxt = context.get_admin_context()
# Create an instance with some metadata
metadata = {'host': 'foo'}
values = {'metadata': metadata}
instance = db.instance_create(ctxt, values)
# Update the metadata
metadata = {'host': 'bar'}
values = {'metadata': metadata}
db.instance_update(ctxt, instance.uuid, values)
# Retrieve the metadata to ensure it was successfully updated
instance_meta = db.instance_metadata_get(ctxt, instance.id)
self.assertEqual('bar', instance_meta['host'])
def test_instance_fault_create(self):
"""Ensure we can create an instance fault"""
ctxt = context.get_admin_context()
uuid = str(utils.gen_uuid())
# Create a fault
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': 404,
}
db.instance_fault_create(ctxt, fault_values)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
self.assertEqual(404, faults[uuid][0]['code'])
def test_instance_fault_get_by_instance(self):
""" ensure we can retrieve an instance fault by instance UUID """
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
uuids = [instance1['uuid'], instance2['uuid']]
# Create faults
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[0],
'code': 404,
}
fault1 = db.instance_fault_create(ctxt, fault_values)
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[0],
'code': 500,
}
fault2 = db.instance_fault_create(ctxt, fault_values)
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[1],
'code': 404,
}
fault3 = db.instance_fault_create(ctxt, fault_values)
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[1],
'code': 500,
}
fault4 = db.instance_fault_create(ctxt, fault_values)
instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
expected = {
uuids[0]: [fault2, fault1],
uuids[1]: [fault4, fault3],
}
self.assertEqual(instance_faults, expected)
def test_instance_faults_get_by_instance_uuids_no_faults(self):
"""None should be returned when no faults exist"""
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
uuids = [instance1['uuid'], instance2['uuid']]
instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
expected = {uuids[0]: [], uuids[1]: []}
self.assertEqual(expected, instance_faults)
def test_dns_registration(self):
domain1 = 'test.domain.one'
domain2 = 'test.domain.two'
testzone = 'testzone'
ctxt = context.get_admin_context()
db.dnsdomain_register_for_zone(ctxt, domain1, testzone)
domain_ref = db.dnsdomain_get(ctxt, domain1)
zone = domain_ref.availability_zone
scope = domain_ref.scope
self.assertEqual(scope, 'private')
self.assertEqual(zone, testzone)
db.dnsdomain_register_for_project(ctxt, domain2,
self.project_id)
domain_ref = db.dnsdomain_get(ctxt, domain2)
project = domain_ref.project_id
scope = domain_ref.scope
self.assertEqual(project, self.project_id)
self.assertEqual(scope, 'public')
db.dnsdomain_unregister(ctxt, domain1)
db.dnsdomain_unregister(ctxt, domain2)
def test_network_get_associated_fixed_ips(self):
ctxt = context.get_admin_context()
values = {'host': 'foo', 'hostname': 'myname'}
instance = db.instance_create(ctxt, values)
values = {'address': 'bar', 'instance_id': instance['id']}
vif = db.virtual_interface_create(ctxt, values)
values = {'address': 'baz',
'network_id': 1,
'allocated': True,
'instance_id': instance['id'],
'virtual_interface_id': vif['id']}
fixed_address = db.fixed_ip_create(ctxt, values)
data = db.network_get_associated_fixed_ips(ctxt, 1)
self.assertEqual(len(data), 1)
record = data[0]
self.assertEqual(record['address'], fixed_address)
self.assertEqual(record['instance_id'], instance['id'])
self.assertEqual(record['network_id'], 1)
self.assertEqual(record['instance_created'], instance['created_at'])
self.assertEqual(record['instance_updated'], instance['updated_at'])
self.assertEqual(record['instance_hostname'], instance['hostname'])
self.assertEqual(record['vif_id'], vif['id'])
self.assertEqual(record['vif_address'], vif['address'])
data = db.network_get_associated_fixed_ips(ctxt, 1, 'nothing')
self.assertEqual(len(data), 0)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate',
'availability_zone': 'fake_avail_zone', }
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result.id, host)
return result
class AggregateDBApiTestCase(test.TestCase):
def setUp(self):
super(AggregateDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_aggregate_create(self):
"""Ensure aggregate can be created with no metadata."""
result = _create_aggregate(metadata=None)
self.assertEqual(result['operational_state'], 'created')
def test_aggregate_create_avoid_name_conflict(self):
"""Test we can avoid conflict on deleted aggregates."""
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1.id)
values = {'name': r1.name, 'availability_zone': 'new_zone'}
r2 = _create_aggregate(values=values)
self.assertEqual(r2.name, values['name'])
self.assertEqual(r2.availability_zone, values['availability_zone'])
self.assertEqual(r2.operational_state, "created")
def test_aggregate_create_raise_exist_exc(self):
"""Ensure aggregate names are distinct."""
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
"""Ensure AggregateNotFound is raised when getting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get,
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
"""Ensure AggregateNotFound is raised when getting metadata."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_metadata_get,
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
"""Ensure aggregate can be created with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
def test_aggregate_create_low_privi_context(self):
"""Ensure right context is applied when creating aggregate."""
self.assertRaises(exception.AdminRequired,
db.aggregate_create,
self.context, _get_fake_aggr_values())
def test_aggregate_get(self):
"""Ensure we can get aggregate with all its relations."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result.id)
self.assertEqual(_get_fake_aggr_hosts(), expected.hosts)
self.assertEqual(_get_fake_aggr_metadata(), expected.metadetails)
def test_aggregate_get_by_host(self):
"""Ensure we can get an aggregate by host."""
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt)
r2 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1.id, r2.id)
def test_aggregate_get_by_host_not_found(self):
"""Ensure AggregateHostNotFound is raised with unknown host."""
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_get_by_host, ctxt, 'unknown_host')
def test_aggregate_delete_raise_not_found(self):
"""Ensure AggregateNotFound is raised when deleting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_delete,
ctxt, aggregate_id)
def test_aggregate_delete(self):
"""Ensure we can delete an aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
expected = db.aggregate_get_all(ctxt, read_deleted='no')
self.assertEqual(0, len(expected))
ctxt = context.get_admin_context(read_deleted='yes')
aggregate = db.aggregate_get(ctxt, result['id'])
self.assertEqual(aggregate["operational_state"], "dismissed")
def test_aggregate_update(self):
"""Ensure an aggregate can be updated."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, 1, new_values)
self.assertNotEqual(result.availability_zone,
updated.availability_zone)
def test_aggregate_update_with_metadata(self):
"""Ensure an aggregate can be updated with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
db.aggregate_update(ctxt, 1, values)
expected = db.aggregate_metadata_get(ctxt, result.id)
self.assertDictMatch(_get_fake_aggr_metadata(), expected)
def test_aggregate_update_with_existing_metadata(self):
"""Ensure an aggregate can be updated with existing metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
db.aggregate_update(ctxt, 1, values)
expected = db.aggregate_metadata_get(ctxt, result.id)
self.assertDictMatch(values['metadata'], expected)
def test_aggregate_update_raise_not_found(self):
"""Ensure AggregateNotFound is raised when updating an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
new_values = _get_fake_aggr_values()
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
"""Ensure we can get all aggregates."""
ctxt = context.get_admin_context()
counter = 3
for c in xrange(counter):
_create_aggregate(context=ctxt,
values={'name': 'fake_aggregate_%d' % c,
'availability_zone': 'fake_avail_zone'},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
"""Ensure we get only non-deleted aggregates."""
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in xrange(1, add_counter):
values = {'name': 'fake_aggregate_%d' % c,
'availability_zone': 'fake_avail_zone'}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in xrange(1, remove_counter):
db.aggregate_delete(ctxt, aggregates[c - 1].id)
results = db.aggregate_get_all(ctxt, read_deleted='no')
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
"""Ensure we can add metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result.id, metadata)
expected = db.aggregate_metadata_get(ctxt, result.id)
self.assertDictMatch(metadata, expected)
def test_aggregate_metadata_update(self):
"""Ensure we can update metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = metadata.keys()[0]
db.aggregate_metadata_delete(ctxt, result.id, key)
new_metadata = {key: 'foo'}
db.aggregate_metadata_add(ctxt, result.id, new_metadata)
expected = db.aggregate_metadata_get(ctxt, result.id)
metadata[key] = 'foo'
self.assertDictMatch(metadata, expected)
def test_aggregate_metadata_delete(self):
"""Ensure we can delete metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result.id, metadata)
db.aggregate_metadata_delete(ctxt, result.id, metadata.keys()[0])
expected = db.aggregate_metadata_get(ctxt, result.id)
del metadata[metadata.keys()[0]]
self.assertDictMatch(metadata, expected)
def test_aggregate_metadata_delete_raise_not_found(self):
"""Ensure AggregateMetadataNotFound is raised when deleting."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
ctxt, result.id, 'foo_key')
def test_aggregate_host_add(self):
"""Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result.id)
self.assertEqual(_get_fake_aggr_hosts(), expected)
def test_aggregate_host_add_deleted(self):
"""Ensure we can add a host that was previously deleted."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
db.aggregate_host_delete(ctxt, result.id, host)
db.aggregate_host_add(ctxt, result.id, host)
expected = db.aggregate_host_get_all(ctxt, result.id,
read_deleted='no')
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_raise_conflict(self):
"""Ensure we cannot add host to distinct aggregates."""
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostConflict,
_create_aggregate_with_hosts, ctxt,
values={'name': 'fake_aggregate2',
'availability_zone': 'fake_avail_zone2', },
metadata=None)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
"""Ensure we cannot add host to the same aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
ctxt, result.id, _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
"""Ensure AggregateFound when adding a host."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
host = _get_fake_aggr_hosts()[0]
self.assertRaises(exception.AggregateNotFound,
db.aggregate_host_add,
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
"""Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result.id,
_get_fake_aggr_hosts()[0])
expected = db.aggregate_host_get_all(ctxt, result.id,
read_deleted='no')
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
"""Ensure AggregateHostNotFound is raised when deleting a host."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
ctxt, result.id, _get_fake_aggr_hosts()[0])
class CapacityTestCase(test.TestCase):
def setUp(self):
super(CapacityTestCase, self).setUp()
self.ctxt = context.get_admin_context()
service_dict = dict(host='host1', binary='binary1',
topic='compute', report_count=1,
disabled=False)
self.service = db.service_create(self.ctxt, service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
service_id=self.service.id)
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
def _create_helper(self, host):
self.compute_node_dict['host'] = host
return db.compute_node_create(self.ctxt, self.compute_node_dict)
def test_compute_node_create(self):
item = self._create_helper('host1')
self.assertEquals(item.free_ram_mb, 1024)
self.assertEquals(item.free_disk_gb, 2048)
self.assertEquals(item.running_vms, 0)
self.assertEquals(item.current_workload, 0)
def test_compute_node_create_with_reservations(self):
self.flags(reserved_host_memory_mb=256)
item = self._create_helper('host1')
self.assertEquals(item.free_ram_mb, 1024 - 256)
def test_compute_node_set(self):
self._create_helper('host1')
x = db.compute_node_utilization_set(self.ctxt, 'host1',
free_ram_mb=2048, free_disk_gb=4096)
self.assertEquals(x.free_ram_mb, 2048)
self.assertEquals(x.free_disk_gb, 4096)
self.assertEquals(x.running_vms, 0)
self.assertEquals(x.current_workload, 0)
x = db.compute_node_utilization_set(self.ctxt, 'host1', work=3)
self.assertEquals(x.free_ram_mb, 2048)
self.assertEquals(x.free_disk_gb, 4096)
self.assertEquals(x.current_workload, 3)
self.assertEquals(x.running_vms, 0)
x = db.compute_node_utilization_set(self.ctxt, 'host1', vms=5)
self.assertEquals(x.free_ram_mb, 2048)
self.assertEquals(x.free_disk_gb, 4096)
self.assertEquals(x.current_workload, 3)
self.assertEquals(x.running_vms, 5)
def test_compute_node_utilization_update(self):
self._create_helper('host1')
x = db.compute_node_utilization_update(self.ctxt, 'host1',
free_ram_mb_delta=-24)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2048)
self.assertEquals(x.running_vms, 0)
self.assertEquals(x.current_workload, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
free_disk_gb_delta=-48)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.running_vms, 0)
self.assertEquals(x.current_workload, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
work_delta=3)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.current_workload, 3)
self.assertEquals(x.running_vms, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
work_delta=-1)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.current_workload, 2)
self.assertEquals(x.running_vms, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
vm_delta=5)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.current_workload, 2)
self.assertEquals(x.running_vms, 5)
class TestIpAllocation(test.TestCase):
def setUp(self):
super(TestIpAllocation, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
self.network = db.network_create_safe(self.ctxt, {})
def create_fixed_ip(self, **params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, None)
def test_fixed_ip_associate_fails_if_ip_in_use(self):
address = self.create_fixed_ip(instance_id=self.instance.id)
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
self.ctxt, address, self.instance.id)
def test_fixed_ip_associate_succeeds(self):
address = self.create_fixed_ip(network_id=self.network.id)
db.fixed_ip_associate(self.ctxt, address, self.instance.id,
network_id=self.network.id)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip.instance_id, self.instance.id)
def test_fixed_ip_associate_succeeds_and_sets_network(self):
address = self.create_fixed_ip()
db.fixed_ip_associate(self.ctxt, address, self.instance.id,
network_id=self.network.id)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip.instance_id, self.instance.id)
self.assertEqual(fixed_ip.network_id, self.network.id)
|
|
from django.db import models
from django.contrib.auth.models import User
import calendar
import ipaddress
import uuid
from django.core.exceptions import ValidationError
import string
from Crypto.PublicKey import RSA
from uwsgi_it_api.config import UWSGI_IT_BASE_UID
import random
import datetime
import os.path
from django.db.models.signals import post_delete
# Create your models here.
def generate_uuid():
return str(uuid.uuid4())
def generate_rsa():
return RSA.generate(2048).exportKey()
class Customer(models.Model):
user = models.OneToOneField(User)
vat = models.CharField(max_length=255,blank=True,null=True)
company = models.CharField(max_length=255,blank=True,null=True)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
uuid = models.CharField(max_length=36, default=generate_uuid, unique=True)
rsa_key = models.TextField(default=generate_rsa, unique=True)
admin_note = models.TextField(blank=True,null=True)
@property
def rsa_key_lines(self):
return self.rsa_key.split('\n')
@property
def rsa_pubkey(self):
return RSA.importKey(self.rsa_key).publickey().exportKey()
@property
def rsa_pubkey_lines(self):
return self.rsa_pubkey.split('\n')
def __unicode__(self):
return self.user.username
class CustomerAttribute(models.Model):
customer = models.ForeignKey(Customer)
namespace = models.CharField(max_length=255)
key = models.CharField(max_length=255)
value = models.TextField(blank=True)
class Meta:
unique_together = ( 'customer', 'namespace', 'key')
class Datacenter(models.Model):
name = models.CharField(max_length=255,unique=True)
description = models.TextField(blank=True,null=True)
note = models.TextField(blank=True,null=True)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class Server(models.Model):
name = models.CharField(max_length=255,unique=True)
address = models.GenericIPAddressField()
hd = models.CharField(max_length=255)
memory = models.PositiveIntegerField("Memory MB")
storage = models.PositiveIntegerField("Storage MB")
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
uuid = models.CharField(max_length=36, default=generate_uuid, unique=True)
etc_resolv_conf = models.TextField("/etc/resolv.conf", default='',blank=True)
etc_hosts = models.TextField("/etc/hosts", default='',blank=True)
weight = models.PositiveIntegerField(default=9999)
datacenter = models.ForeignKey('Datacenter',null=True,blank=True)
note = models.TextField(blank=True,null=True)
owner = models.ForeignKey(Customer,null=True,blank=True)
ssd = models.BooleanField('SSD', default=False)
portmappings_mtime = models.DateTimeField(auto_now=True)
@property
def used_memory(self):
n = self.container_set.all().aggregate(models.Sum('memory'))['memory__sum']
if not n: return 0
return n
@property
def used_storage(self):
n = self.container_set.all().aggregate(models.Sum('storage'))['storage__sum']
if not n: return 0
return n
@property
def free_memory(self):
return self.memory - self.used_memory
@property
def free_storage(self):
return self.storage - self.used_storage
def __unicode__(self):
features = []
if self.ssd:
features.append('SSD')
if self.owner:
features.append('dedicated')
space = ''
if features:
space = ' '
return "%s - %s%s%s" % (self.name, self.address, space, ','.join(features))
@property
def etc_resolv_conf_lines(self):
return self.etc_resolv_conf.replace('\r', '\n').replace('\n\n', '\n').split('\n')
@property
def etc_hosts_lines(self):
return self.etc_hosts.replace('\r', '\n').replace('\n\n', '\n').split('\n')
@property
def munix(self):
return calendar.timegm(self.mtime.utctimetuple())
@property
def portmappings_munix(self):
return calendar.timegm(self.portmappings_mtime.utctimetuple())
class ServerFileMetadata(models.Model):
filename = models.CharField(max_length=255,unique=True)
def __unicode__(self):
return self.filename
class ServerMetadata(models.Model):
server = models.ForeignKey(Server)
metadata = models.ForeignKey(ServerFileMetadata)
value = models.TextField(blank=True,null=True)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "%s - %s " % (self.server.name, self.metadata.filename)
class Meta:
unique_together = ( 'server', 'metadata')
class Legion(models.Model):
name = models.CharField(max_length=255,unique=True)
address = models.GenericIPAddressField()
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
uuid = models.CharField(max_length=36, default=generate_uuid, unique=True)
note = models.TextField(blank=True,null=True)
customer = models.ForeignKey(Customer,null=True,blank=True)
key = models.CharField(max_length=64)
nodes = models.ManyToManyField(Server, through='LegionNode')
quorum = models.PositiveIntegerField(default=0)
def __unicode__(self):
return "%s - %s " % (self.name, self.address)
class LegionNode(models.Model):
legion = models.ForeignKey(Legion)
server = models.ForeignKey(Server)
weight = models.PositiveIntegerField(default=9999)
def __unicode__(self):
return "%s on %s " % (self.server, self.legion)
class FloatingAddress(models.Model):
address = models.GenericIPAddressField()
customer = models.ForeignKey(Customer,null=True,blank=True)
legion = models.ForeignKey(Legion,null=True,blank=True)
mapped_to_server = models.ForeignKey(Server,null=True,blank=True)
note = models.TextField(blank=True,null=True)
def __unicode__(self):
return "%s - %s" % (self.address, self.mapped_to_server)
class Meta:
verbose_name_plural = 'Floating Addresses'
class Distro(models.Model):
name = models.CharField(max_length=255,unique=True)
path = models.CharField(max_length=255,unique=True)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
uuid = models.CharField(max_length=36, default=generate_uuid, unique=True)
note = models.TextField(blank=True,null=True)
def __unicode__(self):
return self.name
class CustomDistro(models.Model):
container = models.ForeignKey('Container')
name = models.CharField(max_length=255)
path = models.CharField(max_length=255)
uuid = models.CharField(max_length=36, default=generate_uuid, unique=True)
note = models.TextField(blank=True,null=True)
tags = models.ManyToManyField('Tag', blank=True)
def __unicode__(self):
return self.name
def clean(self):
allowed = string.letters + string.digits + '._-'
for letter in self.path:
if letter not in allowed:
raise ValidationError('invalid path for custom distro, can contains only "%s"' % allowed)
class Meta:
unique_together = (('container', 'name'), ('container', 'path'))
def start_of_epoch():
return datetime.datetime.fromtimestamp(1)
class Container(models.Model):
name = models.CharField(max_length=255)
ssh_keys_raw = models.TextField("SSH keys", blank=True,null=True)
distro = models.ForeignKey(Distro,null=True,blank=True)
server = models.ForeignKey(Server)
# in megabytes
memory = models.PositiveIntegerField("Memory MB")
storage = models.PositiveIntegerField("Storage MB")
customer = models.ForeignKey(Customer)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
uuid = models.CharField(max_length=36, default=generate_uuid, unique=True)
jid = models.CharField(max_length=255,blank=True,null=True)
jid_secret = models.CharField(max_length=255,blank=True,null=True)
jid_destinations = models.CharField(max_length=255,blank=True,null=True)
pushover_user = models.CharField(max_length=255,blank=True,null=True)
pushover_token = models.CharField(max_length=255,blank=True,null=True)
pushover_sound = models.CharField(max_length=255,blank=True,null=True)
pushbullet_token = models.CharField(max_length=255,blank=True,null=True)
slack_webhook = models.CharField(max_length=255,blank=True,null=True)
quota_threshold = models.PositiveIntegerField("Quota threshold", default=90)
tags = models.ManyToManyField('Tag', blank=True)
nofollow = models.BooleanField(default=False)
note = models.TextField(blank=True,null=True)
accounted = models.BooleanField(default=False)
last_reboot = models.DateTimeField(default=start_of_epoch)
ssh_keys_mtime = models.DateTimeField(default=start_of_epoch)
max_alarms = models.PositiveIntegerField(default=100)
alarm_key = models.CharField(max_length=36, null=True, blank=True)
alarm_freq = models.PositiveIntegerField(default=60)
custom_distros_storage = models.BooleanField(default=False)
custom_distro = models.ForeignKey(CustomDistro,null=True,blank=True,related_name='+')
admin_note = models.TextField(blank=True,null=True)
admin_order = models.CharField(max_length=255,blank=True,null=True)
def __unicode__(self):
return "%d (%s)" % (self.uid, self.name)
# do not allow over-allocate memory or storage
def clean(self):
if self.alarm_freq < 60:
self.alarm_freq = 60
# hack for empty server value
try:
if self.server is None: return
except:
return
current_storage = self.server.container_set.all().aggregate(models.Sum('storage'))['storage__sum']
current_memory = self.server.container_set.all().aggregate(models.Sum('memory'))['memory__sum']
if not current_storage: current_storage = 0
if not current_memory: current_memory = 0
if self.pk:
orig = Container.objects.get(pk=self.pk)
current_storage -= orig.storage
current_memory -= orig.memory
if current_storage+self.storage > self.server.storage:
raise ValidationError('the requested storage size is not available on the specified server')
if current_memory+self.memory > self.server.memory:
raise ValidationError('the requested memory size is not available on the specified server')
# force a reboot if required
def save(self, *args, **kwargs):
interesting_fields = ('name',
'distro',
'server',
'memory',
'storage',
'customer',
'alarm_freq',
'jid',
'jid_secret',
'jid_destinations',
'pushover_user',
'pushover_token',
'pushover_sound',
'pushbullet_token',
'slack_webhook',
'quota_threshold',
'custom_distros_storage',
'custom_distro',
'nofollow')
if self.pk is not None:
orig = Container.objects.get(pk=self.pk)
set_reboot = False
for field in interesting_fields:
if getattr(self, field) != getattr(orig, field):
set_reboot = True
break
if set_reboot:
self.last_reboot = datetime.datetime.now()
if self.ssh_keys_raw != orig.ssh_keys_raw:
self.ssh_keys_mtime = datetime.datetime.now()
super(Container, self).save(*args, **kwargs)
@property
def combo_alarms(self):
alarms = []
if self.pushover_user and self.pushover_token:
alarms.append('pushover')
if self.pushbullet_token:
alarms.append('pushbullet')
if self.slack_webhook:
alarms.append('slack')
if self.jid and self.jid_secret and self.jid_destinations:
alarms.append('xmpp')
return ','.join(alarms)
@property
def rand_pid(self):
return random.randrange(1, 32768)
@property
def uid(self):
return UWSGI_IT_BASE_UID+self.pk
@property
def hostname(self):
h = ''
allowed = string.ascii_letters + string.digits + '-'
for char in self.name:
if char in allowed:
h += char
else:
h += '-'
return h
@property
def ip(self):
# skip the first two addresses (10.0.0.1 for the gateway, 10.0.0.2 for the api)
addr = self.pk + 2
addr0 = 0x0a000000;
return ipaddress.IPv4Address(addr0 | (addr & 0x00ffffff))
@property
def munix(self):
return calendar.timegm(self.last_reboot.utctimetuple())
@property
def ssh_keys_munix(self):
return calendar.timegm(self.ssh_keys_mtime.utctimetuple())
@property
def ssh_keys(self):
# try to generate a clean list of ssh keys
if not self.ssh_keys_raw: return []
cleaned = self.ssh_keys_raw.replace('\r', '\n').replace('\n\n', '\n')
return cleaned.split('\n')
@property
def quota(self):
return self.storage * (1024*1024)
@property
def memory_limit_in_bytes(self):
return self.memory * (1024*1024)
@property
def links(self):
l = []
for link in self.containerlink_set.all():
direction_in = {'direction': 'in', 'src': link.to.ip, 'src_mask': 32, 'dst': link.container.ip, 'dst_mask': 32, 'action': 'allow', 'target': ''}
direction_out = {'direction': 'out','src': link.container.ip, 'src_mask': 32, 'dst': link.to.ip, 'dst_mask': 32, 'action': 'allow', 'target': ''}
if link.container.server != link.to.server:
direction_out['action'] = 'gateway'
direction_out['target'] = "%s:999" % link.to.server.address
l.append(direction_in)
l.append(direction_out)
return l
@property
def linked_to(self):
return [l.to.uid for l in self.containerlink_set.all()]
class ContainerLink(models.Model):
container = models.ForeignKey(Container)
to = models.ForeignKey(Container,related_name='+')
def __unicode__(self):
return "%s --> %s" % (self.container, self.to)
class Meta:
unique_together = ( 'container', 'to')
def clean(self):
if self.container == self.to:
raise ValidationError("cannot link with myself")
class Portmap(models.Model):
proto = models.CharField(max_length=4,choices=(('tcp',) * 2, ('udp',) * 2))
public_port = models.PositiveIntegerField()
container = models.ForeignKey(Container)
private_port = models.PositiveIntegerField()
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
def clean(self):
if self.public_port < 1024 or self.public_port > 65535:
raise ValidationError("invalid public port range")
if self.public_port in (1999, 3022, 3026):
raise ValidationError("invalid public port range")
if self.private_port < 1024 or self.private_port > 65535:
raise ValidationError("invalid private port range")
@property
def munix(self):
return calendar.timegm(self.mtime.utctimetuple())
class Meta:
verbose_name_plural = 'Port Mapping'
unique_together = (('proto', 'public_port', 'container'), ('proto', 'private_port', 'container'))
def portmap_post_delete_handler(sender, instance, **kwargs):
# this ensure mtiem is not called
Server.objects.filter(pk=instance.container.server.pk).update(portmappings_mtime=datetime.datetime.now())
post_delete.connect(portmap_post_delete_handler, Portmap)
class Loopbox(models.Model):
container = models.ForeignKey(Container)
filename = models.CharField(max_length=64)
mountpoint = models.CharField(max_length=255)
ro = models.BooleanField(default=False)
tags = models.ManyToManyField('Tag', blank=True)
def clean(self):
checks = ('..', './', '/.', '//')
starts = ('/',)
ends = ('/',)
equals = ('etc', 'logs', 'run', 'tmp', 'vassals')
for check in checks:
if check in self.filename:
raise ValidationError("invalid filename")
if check in self.mountpoint:
raise ValidationError("invalid mountpoint")
for start in starts:
if self.filename.startswith(start):
raise ValidationError("invalid filename")
if self.mountpoint.startswith(start):
raise ValidationError("invalid mountpoint")
for end in ends:
if self.filename.endswith(end):
raise ValidationError("invalid filename")
if self.mountpoint.endswith(end):
raise ValidationError("invalid mountpoint")
for equal in equals:
if self.filename == equal:
raise ValidationError("invalid filename")
if self.mountpoint == equal:
raise ValidationError("invalid mountpoint")
class Meta:
verbose_name_plural = 'Loopboxes'
unique_together = (('container', 'filename'), ('container', 'mountpoint'))
class Alarm(models.Model):
container = models.ForeignKey(Container)
unix = models.DateTimeField()
level = models.PositiveIntegerField(choices=((0,'system'), (1, 'user'), (2, 'exception'), (3, 'traceback'), (4, 'log')))
# in the format #xxxxxx
color = models.CharField(max_length=7, default='#ffffff')
msg = models.TextField()
line = models.PositiveIntegerField(null=True, blank=True)
func = models.CharField(max_length=255, null=True, blank=True)
filename = models.CharField(max_length=255, null=True, blank=True)
_class = models.CharField('class', max_length=255,blank=True,null=True)
vassal = models.CharField(max_length=255,blank=True,null=True)
def save(self, *args, **kwargs):
if len(self.color) != 7:
raise ValidationError('invalid color')
if not self.color.startswith('#'):
raise ValidationError('invalid color')
# how many alarms ?
alarms = self.container.alarm_set.count()
if alarms + 1 > self.container.max_alarms:
oldest = self.container.alarm_set.all().order_by('unix')[0]
oldest.delete()
super(Alarm, self).save(*args, **kwargs)
class Meta:
ordering = ['-unix']
"""
domains are mapped to customers, each container of the customer
can subscribe to them
"""
class Domain(models.Model):
name = models.CharField(max_length=255,unique=True)
customer = models.ForeignKey(Customer)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
uuid = models.CharField(max_length=36, default=generate_uuid,unique=True)
note = models.TextField(blank=True,null=True)
tags = models.ManyToManyField('Tag', blank=True)
def __unicode__(self):
return self.name
@property
def munix(self):
return calendar.timegm(self.mtime.utctimetuple())
class Tag(models.Model):
name = models.CharField(max_length=255)
customer = models.ForeignKey(Customer)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
note = models.TextField(blank=True,null=True)
def __unicode__(self):
return self.name
class Meta:
unique_together = ('name', 'customer')
class News(models.Model):
content = models.TextField()
public = models.BooleanField(default=False)
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-ctime']
verbose_name_plural = 'News'
"""
Pretty low level model for storing customer configurations out of
the container concept (like rawrouter services or https non-sni proxies)
"""
class CustomService(models.Model):
name = models.CharField(max_length=255,unique=True)
customer = models.ForeignKey(Customer)
server = models.ForeignKey(Server)
config = models.TextField()
ctime = models.DateTimeField(auto_now_add=True)
mtime = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
@property
def munix(self):
return calendar.timegm(self.mtime.utctimetuple())
"""
each metric is stored in a different table
"""
class ContainerMetric(models.Model):
container = models.ForeignKey(Container)
year = models.PositiveIntegerField(null=True)
month = models.PositiveIntegerField(null=True)
day = models.PositiveIntegerField(null=True)
# this ia blob containing raw metrics
json = models.TextField(null=True)
def __unicode__(self):
return "%s-%s-%s" % (self.year, self.month, self.day)
class Meta:
abstract = True
unique_together = ('container', 'year', 'month', 'day')
class DomainMetric(models.Model):
domain = models.ForeignKey(Domain)
container = models.ForeignKey(Container)
year = models.PositiveIntegerField(null=True)
month = models.PositiveIntegerField(null=True)
day = models.PositiveIntegerField(null=True)
# this ia blob containing raw metrics
json = models.TextField(null=True)
def __unicode__(self):
return "%s-%s-%s" % (self.year, self.month, self.day)
class Meta:
abstract = True
unique_together = ('domain', 'container', 'year', 'month', 'day')
"""
real metrics now
"""
# stores values from the tuntap router
class NetworkRXContainerMetric(ContainerMetric):
pass
# stores values from the tuntap router
class NetworkTXContainerMetric(ContainerMetric):
pass
# stores values from the container cgroup
class CPUContainerMetric(ContainerMetric):
pass
# stores values from the container cgroup
class MemoryContainerMetric(ContainerMetric):
pass
# stores values from the container cgroup
class MemoryRSSContainerMetric(ContainerMetric):
pass
# stores values from the container cgroup
class MemoryCacheContainerMetric(ContainerMetric):
pass
# stores values from the container cgroup
class IOReadContainerMetric(ContainerMetric):
pass
# stores values from the container cgroup
class IOWriteContainerMetric(ContainerMetric):
pass
# uses perl Quota package
class QuotaContainerMetric(ContainerMetric):
pass
class HitsDomainMetric(DomainMetric):
pass
class NetworkRXDomainMetric(DomainMetric):
pass
class NetworkTXDomainMetric(DomainMetric):
pass
|
|
from __future__ import absolute_import
import logging
from typing import Any, Set, Tuple, Optional, Text
from django.contrib.auth.backends import RemoteUserBackend
from django.conf import settings
from django.http import HttpResponse
import django.contrib.auth
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib.actions import do_create_user
from zerver.models import UserProfile, Realm, get_user_profile_by_id, \
get_user_profile_by_email, remote_user_to_email, email_to_username, \
get_realm_by_email_domain
from apiclient.sample_tools import client as googleapiclient
from oauth2client.crypt import AppIdentityError
from social.backends.github import GithubOAuth2, GithubOrganizationOAuth2, \
GithubTeamOAuth2
from social.exceptions import AuthFailed
from django.contrib.auth import authenticate
from zerver.lib.utils import check_subdomain, get_subdomain
def pad_method_dict(method_dict):
# type: (Dict[Text, bool]) -> Dict[Text, bool]
"""Pads an authentication methods dict to contain all auth backends
supported by the software, regardless of whether they are
configured on this server"""
for key in AUTH_BACKEND_NAME_MAP:
if key not in method_dict:
method_dict[key] = False
return method_dict
def auth_enabled_helper(backends_to_check, realm):
# type: (List[Text], Optional[Realm]) -> bool
if realm is not None:
enabled_method_dict = realm.authentication_methods_dict()
pad_method_dict(enabled_method_dict)
else:
enabled_method_dict = dict((method, True) for method in Realm.AUTHENTICATION_FLAGS)
pad_method_dict(enabled_method_dict)
for supported_backend in django.contrib.auth.get_backends():
for backend_name in backends_to_check:
backend = AUTH_BACKEND_NAME_MAP[backend_name]
if enabled_method_dict[backend_name] and isinstance(supported_backend, backend):
return True
return False
def ldap_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'LDAP'], realm)
def email_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Email'], realm)
def password_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return ldap_auth_enabled(realm) or email_auth_enabled(realm)
def dev_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Dev'], realm)
def google_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Google'], realm)
def github_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'GitHub'], realm)
def common_get_active_user_by_email(email, return_data=None):
# type: (Text, Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return None
if not user_profile.is_active:
if return_data is not None:
return_data['inactive_user'] = True
return None
if user_profile.realm.deactivated:
if return_data is not None:
return_data['inactive_realm'] = True
return None
return user_profile
class ZulipAuthMixin(object):
def get_user(self, user_profile_id):
# type: (int) -> Optional[UserProfile]
""" Get a UserProfile object from the user_profile_id. """
try:
return get_user_profile_by_id(user_profile_id)
except UserProfile.DoesNotExist:
return None
class SocialAuthMixin(ZulipAuthMixin):
auth_backend_name = None # type: Text
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def authenticate(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[UserProfile]
return_data = kwargs.get('return_data', {})
email_address = self.get_email_address(*args, **kwargs)
if not email_address:
return None
try:
user_profile = get_user_profile_by_email(email_address)
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(kwargs.get("realm_subdomain"),
user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not auth_enabled_helper([self.auth_backend_name], user_profile.realm):
return_data["auth_backend_disabled"] = True
return None
return user_profile
def process_do_auth(self, user_profile, *args, **kwargs):
# type: (UserProfile, *Any, **Any) -> Optional[HttpResponse]
# This function needs to be imported from here due to the cyclic
# dependency.
from zerver.views.auth import (login_or_register_remote_user,
redirect_to_subdomain_login_url)
from zerver.views.registration import redirect_and_log_into_subdomain
return_data = kwargs.get('return_data', {})
inactive_user = return_data.get('inactive_user')
inactive_realm = return_data.get('inactive_realm')
invalid_subdomain = return_data.get('invalid_subdomain')
if inactive_user or inactive_realm:
return None
strategy = self.strategy # type: ignore # This comes from Python Social Auth.
request = strategy.request
email_address = self.get_email_address(*args, **kwargs)
full_name = self.get_full_name(*args, **kwargs)
subdomain = strategy.session_get('subdomain')
if not subdomain:
return login_or_register_remote_user(request, email_address,
user_profile, full_name,
bool(invalid_subdomain))
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist:
return redirect_to_subdomain_login_url()
return redirect_and_log_into_subdomain(realm, full_name, email_address)
class ZulipDummyBackend(ZulipAuthMixin):
"""
Used when we want to log you in but we don't know which backend to use.
"""
def authenticate(self, username=None, realm_subdomain=None, use_dummy_backend=False,
return_data=None):
# type: (Optional[Text], Optional[Text], bool, Optional[Dict[str, Any]]) -> Optional[UserProfile]
if use_dummy_backend:
user_profile = common_get_active_user_by_email(username)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class EmailAuthBackend(ZulipAuthMixin):
"""
Email Authentication Backend
Allows a user to sign in using an email/password pair rather than
a username/password pair.
"""
def authenticate(self, username=None, password=None, realm_subdomain=None, return_data=None):
# type: (Optional[Text], Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
""" Authenticate a user based on email address as the user name. """
if username is None or password is None:
# Return immediately. Otherwise we will look for a SQL row with
# NULL username. While that's probably harmless, it's needless
# exposure.
return None
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not password_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['password_auth_disabled'] = True
return None
if not email_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['email_auth_disabled'] = True
return None
if user_profile.check_password(password):
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class GoogleMobileOauth2Backend(ZulipAuthMixin):
"""
Google Apps authentication for mobile devices
Allows a user to sign in using a Google-issued OAuth2 token.
Ref:
https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app
https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess
"""
def authenticate(self, google_oauth2_token=None, realm_subdomain=None, return_data={}):
# type: (Optional[str], Optional[Text], Dict[str, Any]) -> Optional[UserProfile]
try:
token_payload = googleapiclient.verify_id_token(google_oauth2_token, settings.GOOGLE_CLIENT_ID)
except AppIdentityError:
return None
if token_payload["email_verified"] in (True, "true"):
try:
user_profile = get_user_profile_by_email(token_payload["email"])
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not google_auth_enabled(realm=user_profile.realm):
return_data["google_auth_disabled"] = True
return None
return user_profile
else:
return_data["valid_attestation"] = False
class ZulipRemoteUserBackend(RemoteUserBackend):
create_unknown_user = False
def authenticate(self, remote_user, realm_subdomain=None):
# type: (str, Optional[Text]) -> Optional[UserProfile]
if not remote_user:
return None
email = remote_user_to_email(remote_user)
user_profile = common_get_active_user_by_email(email)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
if not auth_enabled_helper([u"RemoteUser"], user_profile.realm):
return None
return user_profile
class ZulipLDAPException(Exception):
pass
class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend):
# Don't use Django LDAP's permissions functions
def has_perm(self, user, perm, obj=None):
# type: (UserProfile, Any, Any) -> bool
# Using Any type is safe because we are not doing anything with
# the arguments.
return False
def has_module_perms(self, user, app_label):
# type: (UserProfile, str) -> bool
return False
def get_all_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def get_group_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def django_to_ldap_username(self, username):
# type: (Text) -> Text
if settings.LDAP_APPEND_DOMAIN:
if not username.endswith("@" + settings.LDAP_APPEND_DOMAIN):
raise ZulipLDAPException("Username does not match LDAP domain.")
return email_to_username(username)
return username
def ldap_to_django_username(self, username):
# type: (str) -> str
if settings.LDAP_APPEND_DOMAIN:
return "@".join((username, settings.LDAP_APPEND_DOMAIN))
return username
class ZulipLDAPAuthBackend(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None, return_data=None):
# type: (Text, str, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
username = self.django_to_ldap_username(username)
user_profile = ZulipLDAPAuthBackendBase.authenticate(self, username, password)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
return user_profile
except Realm.DoesNotExist:
return None
except ZulipLDAPException:
return None
def get_or_create_user(self, username, ldap_user):
# type: (str, _LDAPUser) -> Tuple[UserProfile, bool]
try:
user_profile = get_user_profile_by_email(username)
if not user_profile.is_active or user_profile.realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
if not ldap_auth_enabled(user_profile.realm):
raise ZulipLDAPException("LDAP Authentication is not enabled")
return user_profile, False
except UserProfile.DoesNotExist:
realm = get_realm_by_email_domain(username)
# No need to check for an inactive user since they don't exist yet
if realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
full_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["full_name"]
short_name = full_name = ldap_user.attrs[full_name_attr][0]
if "short_name" in settings.AUTH_LDAP_USER_ATTR_MAP:
short_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["short_name"]
short_name = ldap_user.attrs[short_name_attr][0]
user_profile = do_create_user(username, None, realm, full_name, short_name)
return user_profile, True
# Just like ZulipLDAPAuthBackend, but doesn't let you log in.
class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None):
# type: (Text, str, Optional[Text]) -> None
return None
class DevAuthBackend(ZulipAuthMixin):
# Allow logging in as any user without a password.
# This is used for convenience when developing Zulip.
def authenticate(self, username, realm_subdomain=None, return_data=None):
# type: (Text, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not dev_auth_enabled(user_profile.realm):
return None
return user_profile
class GitHubAuthBackend(SocialAuthMixin, GithubOAuth2):
auth_backend_name = u"GitHub"
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Text]
try:
return kwargs['response']['email']
except KeyError:
return None
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
try:
return kwargs['response']['name']
except KeyError:
return ''
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[HttpResponse]
kwargs['return_data'] = {}
request = self.strategy.request
kwargs['realm_subdomain'] = get_subdomain(request)
user_profile = None
team_id = settings.SOCIAL_AUTH_GITHUB_TEAM_ID
org_name = settings.SOCIAL_AUTH_GITHUB_ORG_NAME
if (team_id is None and org_name is None):
user_profile = GithubOAuth2.do_auth(self, *args, **kwargs)
elif (team_id):
backend = GithubTeamOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User is not member of GitHub team.")
user_profile = None
elif (org_name):
backend = GithubOrganizationOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User is not member of GitHub organization.")
user_profile = None
return self.process_do_auth(user_profile, *args, **kwargs)
AUTH_BACKEND_NAME_MAP = {
u'Dev': DevAuthBackend,
u'Email': EmailAuthBackend,
u'GitHub': GitHubAuthBackend,
u'Google': GoogleMobileOauth2Backend,
u'LDAP': ZulipLDAPAuthBackend,
u'RemoteUser': ZulipRemoteUserBackend,
} # type: Dict[Text, Any]
|
|
# coding: utf-8
"""
SystemMessageApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class SystemMessageApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_system_message(self, document, **kwargs):
"""
Create some systemMessages
Create one or more systemMessages.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_system_message(document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param SystemMessage document: Create a document by sending the paths to be added in the request body. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:return: SystemMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document', 'select', 'populate', 'sort']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_system_message" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `create_system_message`")
resource_path = '/systemMessages'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'document' in params:
body_params = params['document']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SystemMessage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_by_ids(self, document, **kwargs):
"""
Delete all the objects matching the ids provided.
Delete a set of object in one shot.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_by_ids(document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] document: Array of Ids to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_by_ids" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `delete_by_ids`")
resource_path = '/systemMessages/deleteByIds'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'document' in params:
body_params = params['document']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_system_message_by_id(self, id, **kwargs):
"""
Delete a systemMessage by its unique ID
Deletes an existing systemMessage by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_system_message_by_id(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:return: SystemMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'select', 'populate']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_system_message_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_system_message_by_id`")
resource_path = '/systemMessages/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SystemMessage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_system_message_by_query(self, **kwargs):
"""
Delete some systemMessages by query
Delete all systemMessages matching the specified query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_system_message_by_query(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:param int skip: How many documents to skip. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#skip)
:param int limit: The maximum number of documents to send. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#limit)
:param str conditions: Set the conditions used to find or remove the document(s). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#conditions)
:param str distinct: Set to a path name to retrieve an array of distinct values. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#distinct)
:param str hint: Add an index hint to the query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#hint)
:param str comment: Add a comment to a query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#comment)
:return: list[SystemMessage]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['select', 'populate', 'sort', 'skip', 'limit', 'conditions', 'distinct', 'hint', 'comment']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_system_message_by_query" % key
)
params[key] = val
del params['kwargs']
resource_path = '/systemMessages'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'skip' in params:
query_params['skip'] = params['skip']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'conditions' in params:
query_params['conditions'] = params['conditions']
if 'distinct' in params:
query_params['distinct'] = params['distinct']
if 'hint' in params:
query_params['hint'] = params['hint']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SystemMessage]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_system_message_by_id(self, id, **kwargs):
"""
Get a systemMessage by its unique ID
Retrieve a systemMessage by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_system_message_by_id(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:return: SystemMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'select', 'populate']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_system_message_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_system_message_by_id`")
resource_path = '/systemMessages/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SystemMessage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def query_system_message(self, **kwargs):
"""
Query some systemMessages
Query over systemMessages.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.query_system_message(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:param bool count: Set to true to return count instead of documents. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#count)
:param int skip: How many documents to skip. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#skip)
:param int limit: The maximum number of documents to send. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#limit)
:param str conditions: Set the conditions used to find or remove the document(s). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#conditions)
:param str distinct: Set to a path name to retrieve an array of distinct values. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#distinct)
:param str hint: Add an index hint to the query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#hint)
:param str comment: Add a comment to a query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#comment)
:return: list[SystemMessage]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['select', 'populate', 'sort', 'count', 'skip', 'limit', 'conditions', 'distinct', 'hint', 'comment']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_system_message" % key
)
params[key] = val
del params['kwargs']
resource_path = '/systemMessages'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'count' in params:
query_params['count'] = params['count']
if 'skip' in params:
query_params['skip'] = params['skip']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'conditions' in params:
query_params['conditions'] = params['conditions']
if 'distinct' in params:
query_params['distinct'] = params['distinct']
if 'hint' in params:
query_params['hint'] = params['hint']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SystemMessage]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_system_message(self, id, document, **kwargs):
"""
Modify a systemMessage by its unique ID
Update an existing systemMessage by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_system_message(id, document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param SystemMessage document: Update a document by sending the paths to be updated in the request body. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str x_baucis_update_operator: **BYPASSES VALIDATION** May be used with PUT to update the document using $push, $pull, or $set. [doc](https://github.com/wprl/baucis/wiki/HTTP-Headers)
:return: SystemMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'document', 'select', 'populate', 'x_baucis_update_operator']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_system_message" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_system_message`")
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `update_system_message`")
resource_path = '/systemMessages/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
if 'x_baucis_update_operator' in params:
header_params['X-Baucis-Update-Operator'] = params['x_baucis_update_operator']
form_params = []
local_var_files = {}
body_params = None
if 'document' in params:
body_params = params['document']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SystemMessage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
|
#!/usr/bin/env python3
r"""
This module provides functions which are useful to plug-in call point programs.
"""
import sys
import os
import re
import collections
import gen_print as gp
import gen_valid as gv
import gen_misc as gm
import gen_cmd as gc
import func_args as fa
PLUG_VAR_PREFIX = os.environ.get("PLUG_VAR_PREFIX", "AUTOBOOT")
def get_plug_in_package_name(case=None):
r"""
Return the plug-in package name (e.g. "OS_Console", "DB_Logging").
Description of argument(s):
case Indicates whether the value returned should be converted to upper or
lower case. Valid values are "upper", "lower" or None.
"""
plug_in_package_name = os.path.basename(gp.pgm_dir_path[:-1])
if case == "upper":
return plug_in_package_name.upper()
elif case == "lower":
return plug_in_package_name.lower()
else:
return plug_in_package_name
def return_plug_vars(general=True,
custom=True,
plug_in_package_name=None):
r"""
Return an OrderedDict which is sorted by key and which contains all of the plug-in environment variables.
Example excerpt of resulting dictionary:
plug_var_dict:
[AUTOBOOT_BASE_TOOL_DIR_PATH]: /tmp/
[AUTOBOOT_BB_LEVEL]: <blank>
[AUTOBOOT_BOOT_FAIL]: 0
...
This function also does the following:
- Set a default value for environment variable AUTOBOOT_OPENBMC_NICKNAME/AUTOIPL_FSP1_NICKNAME if it is
not already set.
- Register PASSWORD variables to prevent their values from being printed.
Note: The programmer may set a default for any given environment variable by declaring a global variable
of the same name and setting its value. For example, let's say the calling program has this global
declaration:
PERF_EXERCISERS_TOTAL_TIMEOUT = '180'
If environment variable PERF_EXERCISERS_TOTAL_TIMEOUT is blank or not set, this function will set it to
'180'.
Furthermore, if such a default variable declaration is not a string, this function will preserve that
non-string type in setting global variables (with the exception of os.environ values which must be
string). Example:
NVDIMM_ENCRYPT = 0
Description of argument(s):
general Return general plug-in parms (e.g. those beginning with "AUTOBOOT" or
"AUTOGUI").
custom Return custom plug-in parms (i.e. those beginning with the upper case
name of the plug-in package, for example "OBMC_SAMPLE_PARM1").
plug_in_package_name The name of the plug-in package for which custom parms are to be
returned. The default is the current plug in package name.
"""
regex_list = []
if not (general or custom):
return collections.OrderedDict()
plug_in_package_name = gm.dft(plug_in_package_name, get_plug_in_package_name())
if general:
regex_list = [PLUG_VAR_PREFIX, "AUTOGUI"]
if custom:
regex_list.append(plug_in_package_name.upper())
regex = "^(" + "|".join(regex_list) + ")_"
# Set a default for nickname.
if os.environ.get("AUTOBOOT_OPENBMC_NICKNAME", "") == "":
os.environ['AUTOBOOT_OPENBMC_NICKNAME'] = \
os.environ.get("AUTOBOOT_OPENBMC_HOST", "")
if os.environ.get("AUTOIPL_FSP1_NICKNAME", "") == "":
os.environ['AUTOIPL_FSP1_NICKNAME'] = \
os.environ.get("AUTOIPL_FSP1_NAME", "").split(".")[0]
# For all variables specified in the parm_def file, we want them to default to "" rather than being unset.
# Process the parm_def file if it exists.
parm_def_file_path = os.path.dirname(gp.pgm_dir_path.rstrip("/")) + "/" + plug_in_package_name \
+ "/parm_def"
if os.path.exists(parm_def_file_path):
parm_defs = gm.my_parm_file(parm_def_file_path)
else:
parm_defs = collections.OrderedDict()
# Example parm_defs:
# parm_defs:
# parm_defs[rest_fail]: boolean
# parm_defs[command]: string
# parm_defs[esel_stop_file_path]: string
# Create a list of plug-in environment variables by pre-pending <all caps plug-in package name>_<all
# caps var name>
plug_in_parm_names = [plug_in_package_name.upper() + "_" + x for x in
map(str.upper, parm_defs.keys())]
# Example plug_in_parm_names:
# plug_in_parm_names:
# plug_in_parm_names[0]: STOP_REST_FAIL
# plug_in_parm_names[1]: STOP_COMMAND
# plug_in_parm_names[2]: STOP_ESEL_STOP_FILE_PATH
# os.environ only accepts string values. However, if the user defines default values of other types
# (e.g. int), we wish to preserve the type.
non_string_defaults = {}
# Initialize unset plug-in vars.
for var_name in plug_in_parm_names:
# If there is a global variable with the same name as the environment variable, use its value as a
# default.
default_value = gm.get_mod_global(var_name, "")
if type(default_value) is not str:
non_string_defaults[var_name] = type(default_value)
os.environ[var_name] = os.environ.get(var_name, str(default_value))
if os.environ[var_name] == "":
os.environ[var_name] = str(default_value)
plug_var_dict = \
collections.OrderedDict(sorted({k: v for (k, v) in
os.environ.items()
if re.match(regex, k)}.items()))
# Restore the types of any variables where the caller had defined default values.
for key, value in non_string_defaults.items():
cmd_buf = "plug_var_dict[key] = " + str(value).split("'")[1] + "(plug_var_dict[key]"
if value is int:
# Use int base argument of 0 to allow it to interpret hex strings.
cmd_buf += ", 0)"
else:
cmd_buf += ")"
exec(cmd_buf) in globals(), locals()
# Register password values to prevent printing them out. Any plug var whose name ends in PASSWORD will
# be registered.
password_vals = {k: v for (k, v) in plug_var_dict.items()
if re.match(r".*_PASSWORD$", k)}.values()
map(gp.register_passwords, password_vals)
return plug_var_dict
def sprint_plug_vars(headers=1, **kwargs):
r"""
Sprint the plug-in environment variables (i.e. those that begin with the global PLUG_VAR_PREFIX value or
those that begin with <plug-in package_name>_ in upper case letters.).
Example excerpt of output:
AUTOBOOT_BASE_TOOL_DIR_PATH=/tmp/
AUTOBOOT_BB_LEVEL=
AUTOBOOT_BOOT_FAIL=0
AUTOBOOT_BOOT_FAIL_THRESHOLD=1000000
Description of argument(s):
headers Print a header and a footer.
kwargs These are passed directly to return_plug_vars. See return_plug_vars doc
string for details.
"""
plug_var_dict = return_plug_vars(**kwargs)
buffer = ""
if headers:
buffer += "\n" + gp.sprint_dashes()
for key, value in plug_var_dict.items():
buffer += gp.sprint_varx(key, value)
if headers:
buffer += gp.sprint_dashes() + "\n"
return buffer
def print_plug_in_header():
r"""
Print plug-in header.
When debug is set, print all plug_prefix variables (e.g. AUTOBOOT_OPENBMC_HOST, etc.) and all plug-in
environment variables (e.g. OBMC_SAMPLE_PARM1) with surrounding dashed lines. When debug is not set,
print only the plug-in environment variables (e.g. OBMC_SAMPLE_PARM1) with no surrounding dashed lines.
NOTE: plug-in environment variables means any variable defined in the <plug-in dir>/parm_def file plus
any environment variables whose names begin with the upper-case plug-in package name.
"""
dprint_plug_vars()
if not debug:
qprint_plug_vars(headers=0, general=False, custom=True)
def get_plug_vars(mod_name="__main__", **kwargs):
r"""
Get all plug-in variables and put them in corresponding global variables.
This would include all environment variables beginning with either the global PLUG_VAR_PREFIX value or
with the upper case version of the plug-in package name + underscore (e.g. OP_SAMPLE_VAR1 for plug-in
OP_Sample).
The global variables to be set will be both with and without the global PLUG_VAR_PREFIX value prefix.
For example, if the environment variable in question is AUTOBOOT_OPENBMC_HOST, this function will set
global variable AUTOBOOT_OPENBMC_HOST and global variable OPENBMC_HOST.
Description of argument(s):
mod_name The name of the module whose global plug-in variables should be retrieved.
kwargs These are passed directly to return_plug_vars. See return_plug_vars's
prolog for details.
"""
module = sys.modules[mod_name]
plug_var_dict = return_plug_vars(**kwargs)
# Get all PLUG_VAR_PREFIX environment variables and put them into globals.
for key, value in plug_var_dict.items():
setattr(module, key, value)
setattr(module, re.sub("^" + PLUG_VAR_PREFIX + "_", "", key), value)
def get_plug_default(var_name,
default=None):
r"""
Derive and return a default value for the given parm variable.
Dependencies:
Global variable PLUG_VAR_PREFIX must be set.
This function will assign a default by checking the following environment variables in the order shown.
The first one that has a value will be used.
- <upper case package_name>_<var_name>
- <PLUG_VAR_PREFIX>_OVERRIDE_<var_name>
- <PLUG_VAR_PREFIX>_<var_name>
If none of these are found, this function will return the value passed by the caller in the "default"
parm.
Example:
Let's say your plug-in is named "OS_Console" and you call this function as follows:
get_plug_default("quiet", 0)
The first of these environment variables that is found to be set will be used to provide the default
value.
- OS_CONSOLE_QUIET
- AUTOBOOT_OVERRIDE_QUIET
- AUTOBOOT_QUIET
If none of those has a value, 0 (as specified by the caller in this example) is returned.
Let's say the master driver program is named obmc_boot. obmc_boot program is responsible for calling
plug-ins. Let's further suppose that the user wishes to run the master program with --debug=0 but wishes
to have all plug-ins run with --debug=1. This could be accomplished with the following call:
export AUTOBOOT_OVERRIDE_DEBUG=1 ; obmc_boot --debug=0 --plug_in_dir_paths=<list of plug ins>
As another example, let's suppose that the user wishes to have just the OS_Console plug-in run with debug
and everything else to default to debug=0. This could be accomplished as follows:
export OS_CONSOLE_DEBUG=1 ; obmc_boot --debug=0 --plug_in_dir_paths=<list of plug ins>
And as one more example, let's say the user wishes to have obmc_boot and OS_Console run without debug but
have all other plug-ins run with debug:
export AUTOBOOT_OVERRIDE_DEBUG=1 ; export OS_CONSOLE_DEBUG=0 ; obmc_boot --debug=0
--plug_in_dir_paths=<list of plug ins>
Description of argument(s):
var_name The name of the variable for which a default value is to be calculated.
default The default value if one cannot be determined.
"""
var_name = var_name.upper()
plug_in_package_name = get_plug_in_package_name(case="upper")
package_var_name = plug_in_package_name + "_" + var_name
default_value = os.environ.get(package_var_name, None)
if default_value is not None:
# A package-name version of the variable was found so return its value.
return(default_value)
plug_var_name = PLUG_VAR_PREFIX + "_OVERRIDE_" + var_name
default_value = os.environ.get(plug_var_name, None)
if default_value is not None:
# A PLUG_VAR_PREFIX version of the variable was found so return its value.
return default_value
plug_var_name = PLUG_VAR_PREFIX + "_" + var_name
default_value = os.environ.get(plug_var_name, None)
if default_value is not None:
# A PLUG_VAR_PREFIX version of the variable was found so return its value.
return default_value
return default
def required_plug_in(required_plug_in_names,
plug_in_dir_paths=None):
r"""
Determine whether the required_plug_in_names are in plug_in_dir_paths, construct an error_message and
call gv.process_error_message(error_message).
In addition, for each plug-in in required_plug_in_names, set the global plug-in variables. This is
useful for callers who then want to validate certain values from other plug-ins.
Example call:
required_plug_in(required_plug_in_names)
Description of argument(s):
required_plug_in_names A list of plug_in names that the caller requires (e.g. ['OS_Console']).
plug_in_dir_paths A string which is a colon-delimited list of plug-ins specified by the
user (e.g. DB_Logging:FFDC:OS_Console:Perf). Path values (e.g.
"/home/robot/dir1") will be stripped from this list to do the analysis.
Default value is the AUTOGUI_PLUG_IN_DIR_PATHS or
<PLUG_VAR_PREFIX>_PLUG_IN_DIR_PATHS environment variable.
"""
# Calculate default value for plug_in_dir_paths.
plug_in_dir_paths = gm.dft(plug_in_dir_paths,
os.environ.get('AUTOGUI_PLUG_IN_DIR_PATHS',
os.environ.get(PLUG_VAR_PREFIX + "_PLUG_IN_DIR_PATHS", "")))
# Convert plug_in_dir_paths to a list of base names.
plug_in_dir_paths = \
list(filter(None, map(os.path.basename, plug_in_dir_paths.split(":"))))
error_message = gv.valid_list(plug_in_dir_paths, required_values=required_plug_in_names)
if error_message:
return gv.process_error_message(error_message)
for plug_in_package_name in required_plug_in_names:
get_plug_vars(general=False, plug_in_package_name=plug_in_package_name)
def compose_plug_in_save_dir_path(plug_in_package_name=None):
r"""
Create and return a directory path name that is suitable for saving plug-in data.
The name will be comprised of things such as plug_in package name, pid, etc. in order to guarantee that
it is unique for a given test run.
Description of argument(s):
plug_in_package_name The plug-in package name. This defaults to the name of the caller's
plug-in package. However, the caller can specify another value in order
to retrieve data saved by another plug-in package.
"""
plug_in_package_name = gm.dft(plug_in_package_name,
get_plug_in_package_name())
BASE_TOOL_DIR_PATH = \
gm.add_trailing_slash(os.environ.get(PLUG_VAR_PREFIX
+ "_BASE_TOOL_DIR_PATH",
"/tmp/"))
NICKNAME = os.environ.get("AUTOBOOT_OPENBMC_NICKNAME", "")
if NICKNAME == "":
NICKNAME = os.environ["AUTOIPL_FSP1_NICKNAME"]
MASTER_PID = os.environ[PLUG_VAR_PREFIX + "_MASTER_PID"]
gp.dprint_vars(BASE_TOOL_DIR_PATH, NICKNAME, plug_in_package_name, MASTER_PID)
return BASE_TOOL_DIR_PATH + gm.username() + "/" + NICKNAME + "/" +\
plug_in_package_name + "/" + str(MASTER_PID) + "/"
def create_plug_in_save_dir(plug_in_package_name=None):
r"""
Create a directory suitable for saving plug-in processing data and return its path name.
See compose_plug_in_save_dir_path for details.
Description of argument(s):
plug_in_package_name See compose_plug_in_save_dir_path for details.
"""
plug_in_save_dir_path = compose_plug_in_save_dir_path(plug_in_package_name)
if os.path.isdir(plug_in_save_dir_path):
return plug_in_save_dir_path
gc.shell_cmd("mkdir -p " + plug_in_save_dir_path)
return plug_in_save_dir_path
def delete_plug_in_save_dir(plug_in_package_name=None):
r"""
Delete the plug_in save directory. See compose_plug_in_save_dir_path for details.
Description of argument(s):
plug_in_package_name See compose_plug_in_save_dir_path for details.
"""
gc.shell_cmd("rm -rf "
+ compose_plug_in_save_dir_path(plug_in_package_name))
def save_plug_in_value(var_value=None, plug_in_package_name=None, **kwargs):
r"""
Save a value in a plug-in save file. The value may be retrieved later via a call to the
restore_plug_in_value function.
This function will figure out the variable name corresponding to the value passed and use that name in
creating the plug-in save file.
The caller may pass the value as a simple variable or as a keyword=value (see examples below).
Example 1:
my_var1 = 5
save_plug_in_value(my_var1)
In this example, the value "5" would be saved to the "my_var1" file in the plug-in save directory.
Example 2:
save_plug_in_value(my_var1=5)
In this example, the value "5" would be saved to the "my_var1" file in the plug-in save directory.
Description of argument(s):
var_value The value to be saved.
plug_in_package_name See compose_plug_in_save_dir_path for details.
kwargs The first entry may contain a var_name/var_value. Other entries are
ignored.
"""
if var_value is None:
var_name = next(iter(kwargs))
var_value = kwargs[var_name]
else:
# Get the name of the variable used as argument one to this function.
var_name = gp.get_arg_name(0, 1, stack_frame_ix=2)
plug_in_save_dir_path = create_plug_in_save_dir(plug_in_package_name)
save_file_path = plug_in_save_dir_path + var_name
gp.qprint_timen("Saving \"" + var_name + "\" value.")
gp.qprint_varx(var_name, var_value)
gc.shell_cmd("echo '" + str(var_value) + "' > " + save_file_path)
def restore_plug_in_value(*args, **kwargs):
r"""
Return a value from a plug-in save file.
The args/kwargs are interpreted differently depending on how this function is called.
Mode 1 - The output of this function is assigned to a variable:
Example:
my_var1 = restore_plug_in_value(2)
In this mode, the lvalue ("my_var1" in this example) will serve as the name of the value to be restored.
Mode 2 - The output of this function is NOT assigned to a variable:
Example:
if restore_plug_in_value('my_var1', 2):
do_something()
In this mode, the caller must explicitly provide the name of the value being restored.
The args/kwargs are interpreted as follows:
Description of argument(s):
var_name The name of the value to be restored. Only relevant in mode 1 (see
example above).
default The default value to be returned if there is no plug-in save file for the
value in question.
plug_in_package_name See compose_plug_in_save_dir_path for details.
"""
# Process args.
lvalue = gp.get_arg_name(0, -1, stack_frame_ix=2)
if lvalue:
var_name = lvalue
else:
var_name, args, kwargs = fa.pop_arg("", *args, **kwargs)
default, args, kwargs = fa.pop_arg("", *args, **kwargs)
plug_in_package_name, args, kwargs = fa.pop_arg(None, *args, **kwargs)
if args or kwargs:
error_message = "Programmer error - Too many arguments passed for this function."
raise ValueError(error_message)
plug_in_save_dir_path = create_plug_in_save_dir(plug_in_package_name)
save_file_path = plug_in_save_dir_path + var_name
if os.path.isfile(save_file_path):
gp.qprint_timen("Restoring " + var_name + " value from " + save_file_path + ".")
var_value = gm.file_to_list(save_file_path, newlines=0, comments=0, trim=1)[0]
if type(default) is bool:
# Convert from string to bool.
var_value = (var_value == 'True')
if type(default) is int:
# Convert from string to int.
var_value = int(var_value)
else:
var_value = default
gp.qprint_timen("Save file " + save_file_path + " does not exist so returning default value.")
gp.qprint_varx(var_name, var_value)
return var_value
def exit_not_master():
r"""
Exit the program with return code zero if this program was NOT called by the master program.
There are cases where plug-ins are called by a multi-layered stack:
master_wrapper
obmc_boot_test.py
Example_plug_in/cp_setup
In a scenario like this, Example_plug_in/cp_setup may be called once directly by master_wrapper (the
master) and and then called again directly by obmc_boot_test.py (the child). Some plug-in programs may
wish to avoid doing any processing on the second such call. This function will achieve that purpose.
This function will print a standard message to stdout prior to exiting.
"""
AUTOBOOT_MASTER_PID = gm.get_mod_global("AUTOBOOT_MASTER_PID")
AUTOBOOT_PROGRAM_PID = gm.get_mod_global("AUTOBOOT_PROGRAM_PID")
if AUTOBOOT_MASTER_PID != AUTOBOOT_PROGRAM_PID:
message = get_plug_in_package_name() + "/" + gp.pgm_name + " is not" \
+ " being called by the master program in the stack so no action" \
+ " will be taken."
gp.qprint_timen(message)
gp.qprint_vars(AUTOBOOT_MASTER_PID, AUTOBOOT_PROGRAM_PID)
exit(0)
def add_tarball_tools_dir_to_path(quiet=0):
r"""
Find the directory containing the tarball tools and pre-pend it to PATH.
The calling program is responsible for making sure that the tarball has been unpacked.
"""
AUTOBOOT_BASE_TOOL_DIR_PATH = gm.get_mod_global("AUTOBOOT_BASE_TOOL_DIR_PATH")
AUTOBOOT_OPENBMC_NICKNAME = gm.get_mod_global("AUTOBOOT_OPENBMC_NICKNAME")
tool_dir_path = AUTOBOOT_BASE_TOOL_DIR_PATH + os.environ.get('USER') + os.sep \
+ AUTOBOOT_OPENBMC_NICKNAME + os.sep
tarball_tools_dir_path = tool_dir_path + 'tarball/x86/bin'
os.environ['PATH'] = gm.add_path(tarball_tools_dir_path, os.environ.get('PATH', ''))
def stop_test_rc():
r"""
Return the constant stop test return code value.
When a plug-in call point program returns this value, it indicates that master program should stop
running.
"""
return 0x00000002
def dump_ffdc_rc():
r"""
Return the constant dump FFDC return code value.
When a plug-in call point program returns this value, it indicates that FFDC data should be collected.
"""
return 0x00000002
# Create print wrapper functions for all sprint functions defined above.
# func_names contains a list of all print functions which should be created from their sprint counterparts.
func_names = ['print_plug_vars']
# stderr_func_names is a list of functions whose output should go to stderr rather than stdout.
stderr_func_names = []
replace_dict = dict(gp.replace_dict)
replace_dict['mod_qualifier'] = 'gp.'
func_defs = gp.create_print_wrapper_funcs(func_names, stderr_func_names,
replace_dict)
gp.gp_debug_print(func_defs)
exec(func_defs)
|
|
from unittest.mock import patch, call
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import TestCase
from accounts.factories import UserFactory
from matches.factories import PastMatchFactory, FutureMatchFactory, BetFactory
from matches.models import Match, Bet
class MatchModelTest(TestCase):
def test_cant_be_without_fields(self):
match = Match()
with self.assertRaises(ValidationError):
match.full_clean()
def test_has_is_in_future_attr(self):
past_match = PastMatchFactory.create()
future_match = FutureMatchFactory.create()
self.assertTrue(future_match.in_future)
self.assertFalse(past_match.in_future)
def test_can_be_without_scores(self):
match = FutureMatchFactory.create()
match.full_clean() # should not raise
def test_default_score_values(self):
match = Match()
self.assertEqual(match.home_score, None)
self.assertEqual(match.away_score, None)
@patch('matches.models.Match.save')
def test_set_score_calls_save_model_for_past_match(self, mock_save):
match = PastMatchFactory.create()
mock_save.reset_mock()
match.set_score(home_score=1, away_score=2)
self.assertTrue(mock_save.called)
@patch('matches.models.Match.save')
def test_set_score_doesnt_call_save_model_for_future_match(self, mock_save):
match = FutureMatchFactory.create()
mock_save.reset_mock()
match.set_score(home_score=1, away_score=2)
self.assertFalse(mock_save.called)
def test_has_result_true_for_past_match_with_result(self):
match = PastMatchFactory.create()
self.assertTrue(match.has_result)
def test_has_result_false_for_past_match_without_result(self):
match = PastMatchFactory.create(home_score=None, away_score=None)
self.assertFalse(match.has_result)
def test_has_result_false_for_future_match(self):
match = FutureMatchFactory.create()
self.assertFalse(match.has_result)
def test_has_string_representation(self):
match = PastMatchFactory(
home_team='A', away_team='B',
home_score=0, away_score=1
)
self.assertEqual(str(match), 'A 0 - 1 B')
def test_match_without_result_has_empty_string_representation(self):
match = PastMatchFactory(
home_team='A', away_team='B',
home_score=None, away_score=None
)
self.assertEqual(str(match), 'A - B')
def test_match_has_result_property(self):
match = PastMatchFactory(home_score=0, away_score=1)
self.assertEqual(match.result, '0 - 1')
def test_match_has_result_property_for_no_result(self):
match = PastMatchFactory(home_score=None, away_score=None)
self.assertEqual(match.result, '')
@patch('matches.models.Bet.set_result')
def test_match_update_bets_calls_set_result_for_every_bet(self, mock_set_result):
user1 = UserFactory.create()
user2 = UserFactory.create()
match = PastMatchFactory(home_score=1, away_score=2)
bet1 = BetFactory(match=match, user=user1)
BetFactory(match=match, user=user2)
match.update_bets()
self.assertEqual(mock_set_result.call_count,2)
@patch('matches.models.Match.update_bets')
def test_match_saves_with_scores_calls_update_bets(self, mock_update_bets):
match = PastMatchFactory(home_score=None, away_score=None)
match.home_score = 1
match.home_score = 1
match.save()
self.assertTrue(mock_update_bets.called)
class BetModelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.past_match = PastMatchFactory.create()
cls.future_match = FutureMatchFactory.create()
cls.user = UserFactory.create()
def test_should_contain_match(self):
with self.assertRaises(IntegrityError):
BetFactory(user=self.user)
def test_should_contain_user(self):
with self.assertRaises(IntegrityError):
BetFactory(match=self.past_match)
def test_invalid_incorrect_score(self):
bet = Bet(home_score='two', away_score=1, match=self.future_match, user=self.user)
with self.assertRaises(ValidationError):
bet.full_clean()
def test_valid_correct_score(self):
bet = Bet(home_score=2, away_score=1, match=self.future_match, user=self.user)
bet.full_clean()
# should not raise
def test_valid_for_future_matches(self):
bet = BetFactory.create(match=self.future_match, user=self.user)
bet.full_clean()
# should not raise
def test_invalid_for_past_matches(self):
bet = BetFactory.create(match=self.past_match, user=self.user)
with self.assertRaises(ValidationError):
bet.full_clean()
def test_has_string_representation(self):
bet = BetFactory(home_score=0, away_score=1, match=self.past_match, user=self.user)
self.assertEqual(str(bet), '0 - 1')
def test_empty_bet_has_string_representation(self):
bet = Bet(match=self.past_match, user=self.user)
self.assertEqual(str(bet), '')
def test_result_field_can_be_black(self):
bet = BetFactory.create(home_score=2, away_score=1, match=self.past_match, user=self.user)
self.assertEqual(bet.result, None)
@patch('matches.bet_result.calc_bet_result')
def test_bet_save_doesnt_calls_calc_result_func(self, mock_calc_bet_result):
match = FutureMatchFactory.create()
BetFactory.build(home_score=2, away_score=1, match=match, user=self.user)
self.assertFalse(mock_calc_bet_result.called, False)
@patch('matches.bet_result.calc_bet_result')
def test_past_match_set_score_calls_calc_result_func_for_all_match_bets(self, mock_calc_bet_result):
match = PastMatchFactory.create(home_score=None, away_score=None)
user2 = UserFactory.create()
BetFactory.create(home_score=4, away_score=3, match=match, user=self.user)
BetFactory.create(home_score=2, away_score=1, match=match, user=user2)
mock_calc_bet_result.return_value = 12
self.assertFalse(mock_calc_bet_result.called)
match.set_score(home_score=2, away_score=1)
self.assertEqual(mock_calc_bet_result.call_count, 2)
mock_calc_bet_result.assert_has_calls([
call(
home_bet=4, away_bet=3, home_score=2, away_score=1,
shootout_winner=None, shootout_bet=None,
),
call(
home_bet=2, away_bet=1, home_score=2, away_score=1,
shootout_winner=None, shootout_bet=None,
)
])
def test_past_match_set_score_set_all_match_bets_results(self):
match = PastMatchFactory.create(home_score=None, away_score=None)
user2 = UserFactory.create()
BetFactory.create(home_score=4, away_score=3, match=match, user=self.user)
BetFactory.create(home_score=2, away_score=1, match=match, user=user2)
match.set_score(home_score=2, away_score=1)
self.assertEqual(Bet.objects.all()[0].result, 6)
self.assertEqual(Bet.objects.all()[1].result, 12)
def check_format(self, result):
# pass match with 5-4
bet = Bet(match=self.future_match)
bet.set_bet(result)
self.assertEqual(bet.home_score, 5)
self.assertEqual(bet.away_score, 4)
def test_check_formats(self):
self.check_format('5-4')
self.check_format('5 - 4')
self.check_format('5 -4')
self.check_format(' 5-4 ')
self.check_format('5: 4 ')
def test_check_format_fail(self):
with self.assertRaises(ValidationError):
self.check_format('5=4')
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 9 09:49:43 2017
NB 2013 had no surveys taken
NB 2014 AnswerIDs have not been matched to ProfileIDs
@author: SaintlyVi
"""
import pandas as pd
import os
from support import cdata_dir, emdata_dir, writeLog
import features.feature_socios as socios
import features.feature_ts as ts
def readClasses(year, dir_name):
"""
This function gets the inferred class for each AnswerID from 'DLR_DB/classmod/out/experiment_dir'.
"""
try:
dir_path = os.path.join(cdata_dir, dir_name)
file_name = [s for s in os.listdir(dir_path) if str(year) in s][0]
classes = pd.read_csv(os.path.join(dir_path, file_name), header=0, index_col=0)
return classes
except IndexError:
print('No classes inferred for '+ str(year))
raise
def selectClasses(year, dir_name, threshold='max'):
"""
This function sets the inferred class for each AnswerID.
"""
df = readClasses(year, dir_name)
if threshold == 'max':
inferredclass = df.idxmax(axis=1) #USER MUST BE ABLE TO CHANGE THIS
inferredclass = inferredclass.reset_index()
inferredclass.rename(columns={0:'class'}, inplace=True)
return inferredclass
def yearsElectrified(year):
"""
This function gets the number of years since electrification for each AnswerID.
"""
try:
if 1994 <= year <= 1999:
data = socios.buildFeatureFrame(['years'], year)[0]
elif 2000 <= year:
data = socios.buildFeatureFrame(['electricity'], year)[0]
data.columns = ['AnswerID','YearsElectrified']
cats = [0] + list(range(2, 16)) + [100]
data.YearsElectrified = pd.cut(data.YearsElectrified, cats, right=False, labels = list(range(1, 16)), include_lowest=False)
data.YearsElectrified = data.YearsElectrified.astype('int', copy=False)
except:
return print('Could not retreive valid data for the given year.')
return data
def observedMaxDemand(profilepowerdata, year, classes_dir):
"""
This function selects the maximum demand in kVA for each AnswerID in a year and returns it with its time of occurence.
"""
if year <= 2009:
power_col = 'kw_calculated'
else:
power_col = 'Unitsread_kva'
maxdemand = profilepowerdata.iloc[profilepowerdata.reset_index().groupby(['AnswerID'])[power_col].idxmax()].reset_index(drop=True)
maxdemand['month'] = maxdemand['Datefield'].dt.month
maxdemand['daytype'] = maxdemand['Datefield'].dt.dayofweek
maxdemand['hour'] = maxdemand['Datefield'].dt.hour
md = maxdemand[['AnswerID','RecorderID',power_col ,'month','daytype','hour']]
classes = selectClasses(year, classes_dir)
yearselect = yearsElectrified(year)
meta = pd.merge(classes, yearselect, on='AnswerID')
profiles = pd.merge(md, meta, on='AnswerID')
return profiles
def observedDemandSummary(annual_monthly_demand_data, year, classes_dir):
"""
This function generates a demand summary model based on a year of data.
The model contains aggregate hourly kW readings for the factors:
Customer Class
Years Electrified
"""
interval = annual_monthly_demand_data.interval[0]
classes = selectClasses(year, classes_dir)
yearselect = yearsElectrified(year)
meta = pd.merge(classes, yearselect, on='AnswerID')
richprofiles = pd.merge(annual_monthly_demand_data, meta, on='AnswerID')
profiles = richprofiles.groupby(['class','YearsElectrified']).agg({
interval+'_kw_mean':['mean','std'],
interval+'_kw_std':['mean','std'],
# interval+'_kva_mean':['mean','std'],
# interval+'_kva_std':['mean','std'],
'valid_hours':'sum',
'interval_hours_sum':'sum',
'AnswerID':'count'})
profiles.columns = ['_'.join(col).strip() for col in profiles.columns.values]
profiles.rename(columns={interval+'_kw_mean_mean':interval+'_kw_mean',
interval+'_kw_mean_std':interval+'_kw_mean_diversity',
interval+'_kw_std_mean':interval+'_kw_std',
interval+'_kw_std_std':interval+'_kw_std_diversity',
# interval+'_kva_mean_mean':interval+'_kva_mean',
# interval+'_kva_mean_std':interval+'_kva_mean_diversity',
# interval+'_kva_std_mean':interval+'_kva_std',
# interval+'_kva_std_std':interval+'_kva_std_diversity',
'valid_hours_sum':'valid_hours',
'interval_hours_sum_sum': 'interval_hours'}, inplace=True)
profiles['valid_obs_ratio'] = profiles['valid_hours'] / profiles['interval_hours']
profiles.drop(columns=['valid_hours', 'interval_hours'], inplace=True)
return profiles.reset_index()
def observedHourlyProfiles(aggdaytype_demand_data, year, classes_dir):
"""
This function generates an hourly load profile model based on a year of data.
The model contains aggregate hourly kVA readings for the factors:
Customer Class
Month
Daytype [Weekday, Sunday, Monday]
Hour
Years Electrified
"""
classes = selectClasses(year, classes_dir)
yearselect = yearsElectrified(year)
meta = pd.merge(classes, yearselect, on='AnswerID')
richprofiles = pd.merge(aggdaytype_demand_data, meta, on='AnswerID')
profiles = richprofiles.groupby(['class','YearsElectrified','month','daytype','hour']).agg({
'kw_mean':['mean','std'],
'kw_std':['mean','std'],
# 'kva_mean':['mean','std'],
# 'kva_std':['mean','std'],
'valid_hours':'sum',
'AnswerID':'count',
'total_hours_sum':'sum'})
profiles.columns = ['_'.join(col).strip() for col in profiles.columns.values]
profiles.rename(columns={
'kw_mean_mean':'kw_mean',
'kw_mean_std':'kw_mean_diversity',
'kw_std_mean':'kw_std',
'kw_std_std':'kw_std_diversity',
# 'kva_mean_mean':'kva_mean',
# 'kva_mean_std':'kva_mean_diversity',
# 'kva_std_mean':'kva_std',
# 'kva_std_std':'kva_std_diversity',
'valid_hours_sum':'valid_hours',
'total_hours_sum_sum': 'total_hours'}, inplace=True)
profiles['valid_obs_ratio'] = profiles['valid_hours'] / profiles['total_hours']
return profiles.reset_index()
def generateRun(year, experiment, algorithm, run):
"""
This function generates the experimental model from observations.
"""
classes_dir = experiment+'_'+algorithm+'_'+str(run)
pp = ts.readAggProfiles(year, 'pp')
aggpp =ts. readAggProfiles(year, 'aggpp_M')
amd = ts.readAggProfiles(year, 'aMd')
adtd = ts.readAggProfiles(year, 'adtd')
#TODO have to match AnswerID to ProfileID --- think of doing this in socios.loadID and match on the classes side rather than profiles side
try:
md = observedMaxDemand(pp, year, classes_dir)
ods = observedDemandSummary(amd, year, classes_dir)
ohp = observedHourlyProfiles(adtd, year, classes_dir)
except Exception as e:
print(e)
raise
return ods, ohp, md, adtd, amd, aggpp, pp
def saveExpModel(year, experiment, algorithm, run):
"""
This function generates the experimental model from observations.
"""
ods, ohp, md, adtd, amd, aggpp, pp = generateRun(year, experiment, algorithm, run)
run_dir = experiment+'_'+algorithm+'_'+str(run)
dir_path = os.path.join(emdata_dir, run_dir)
os.makedirs(dir_path , exist_ok=True)
loglines = []
for k, v in {'demand_summary':ods, 'hourly_profiles':ohp}.items():
try:
file_path = os.path.join(dir_path, k + '_'+ str(year) + '.csv')
v.to_csv(file_path, index=False)
status = 1
message = 'Success!'
print('Successfully saved to' + file_path)
except Exception as e:
pass
status = 0
message = e
print('Could not save '+ k)
l = [k, year, experiment, algorithm, run, status, message]
loglines.append(l)
logs = pd.DataFrame(loglines, columns = ['submodel_type', 'year', 'experiment', 'algorithm',
'run', 'status','message'])
writeLog(logs,'log_modelRun')
return
|
|
import sys
import unittest
from nose.tools import assert_true, assert_false, assert_is_not_none, \
assert_equals, assert_is_none
from robotide.robotapi import (
TestCaseFile, Resource, VariableTable, TestDataDirectory)
from robotide.context import IS_WINDOWS
from robotide.namespace.namespace import _VariableStash
from robotide.controller.filecontrollers import DataController
from robotide.spec.iteminfo import ArgumentInfo, VariableInfo
from robotide.spec.librarymanager import LibraryManager
from robotide.utils import normpath
from datafilereader import *
from resources.mocks import FakeSettings
RESOURCES_DIR = 'resources'
sys.path.append(os.path.join(os.path.dirname(__file__), '..', RESOURCES_DIR,
'robotdata', 'libs'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', RESOURCES_DIR,
'robotdata', 'put_into_python_path'))
OS_LIB = 'OperatingSystem'
COLLECTIONS_LIB = 'Collections'
STRING_LIB = 'String'
TELNET_LIB = 'Telnet'
TELNET_LIB_ALIAS = 'telikka'
RES_NAME_VARIABLE = '${resname}'
LIB_NAME_VARIABLE = '${libname}'
UNRESOLVABLE_VARIABLE = '${unresolvable}'
UNKNOWN_VARIABLE = '${this var does not exist}'
EXTENSION_VAR = '${extension}'
EXTENSION = 'txt'
INVALID_FILE_PATH = '/this/is/invalid.py'
EXISTING_USER_KEYWORD = 'Should be in keywords Uk'
COLLIDING_ARGUMENT = '${colliding argument}'
COLLIDING_CONSTANT = COLLIDING_ARGUMENT.upper()
def _build_test_case_file():
tcf = TestCaseFile()
tcf.source = 'tmp.txt'
tcf.directory = '/tmp/'
_add_settings_table(tcf)
_add_variable_table(tcf)
_add_keyword_table(tcf)
return tcf
def _add_settings_table(tcf):
tcf.setting_table.add_library(OS_LIB)
tcf.setting_table.add_resource(RESOURCE_PATH)
tcf.setting_table.add_resource(RESOURCE_LIB_PATH)
tcf.setting_table.add_resource(RES_NAME_VARIABLE)
tcf.setting_table.add_library(LIB_NAME_VARIABLE)
tcf.setting_table.add_library(UNRESOLVABLE_VARIABLE)
tcf.setting_table.add_library(LIBRARY_WITH_SPACES_IN_PATH)
tcf.setting_table.add_library(TELNET_LIB, ['WITH NAME', TELNET_LIB_ALIAS])
tcf.setting_table.add_resource(RESOURCE_WITH_VARIABLE_IN_PATH)
tcf.setting_table.add_variables(INVALID_FILE_PATH)
def _add_variable_table(tcf):
tcf.variable_table.add(LIB_NAME_VARIABLE, COLLECTIONS_LIB)
tcf.variable_table.add(RES_NAME_VARIABLE, RESOURCE_WITH_VARS)
tcf.variable_table.add(EXTENSION_VAR, EXTENSION)
tcf.variable_table.add(UNRESOLVABLE_VARIABLE, UNKNOWN_VARIABLE)
tcf.variable_table.add(COLLIDING_CONSTANT, 'collision')
tcf.variable_table.add('&{dict var}', {'key': 'value'})
tcf.variable_table.add(u'${I <3 Unicode and \xe4iti}', u'123 \xe7')
def _add_keyword_table(tcf):
uk_table = tcf.keyword_table
uk_table.add(EXISTING_USER_KEYWORD)
uk_table.keywords[0].args.value = [
'${keyword argument}', '${colliding argument}',
'${keyword argument with default} = default']
class ParentMock(object):
source = '/tmp/example/parentmock'
directory = '/tmp/exmaple'
report_invalid_syntax = lambda *args: None
class _DataFileTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tcf = _build_test_case_file()
cls.tcf_ctrl = DataController(cls.tcf, None)
cls.kw = cls.tcf_ctrl.keywords[0]
cls.ns = Namespace(FakeSettings())
cls.library_manager = LibraryManager(':memory:')
cls.library_manager.start()
cls.library_manager.create_database()
cls.ns.set_library_manager(cls.library_manager)
@classmethod
def tearDownClass(cls):
cls.library_manager.stop()
cls.library_manager = None
class TestKeywordSuggestions(_DataFileTest):
def test_getting_suggestions_for_empty_datafile(self):
start = 'shOulD'
sugs = self.ns.get_suggestions_for(self.kw, start)
assert_true(len(sugs) > 0)
for s in sugs:
assert_true(s.name.lower().startswith(start.lower()))
def test_getting_suggestions_in_order(self):
sugs = self.ns.get_suggestions_for(self.kw, 'sHoUlD')
assert_true(len(sugs) > 2)
assert_equals(sugs, sorted(sugs))
def test_user_keywords(self):
sugs = self.ns.get_suggestions_for(self.kw, 'sHoUlD')
assert_true(EXISTING_USER_KEYWORD in [s.name for s in sugs])
def test_imported_lib_keywords(self):
sugs = self.ns.get_suggestions_for(self.kw, 'create file')
self._assert_import_kws(sugs, OS_LIB)
def test_lib_from_resource_file(self):
sugs = self.ns.get_suggestions_for(self.kw, 'generate random')
self._assert_import_kws(sugs, STRING_LIB)
def test_lib_import_from_var(self):
sugs = self.ns.get_suggestions_for(self.kw, 'Copy List')
self._assert_import_kws(sugs, COLLECTIONS_LIB)
def test_lib_import_with_spaces(self):
sugs = self.ns.get_suggestions_for(self.kw, 'space')
# remove variable suggestions
sugs = [s for s in sugs if not isinstance(s, VariableInfo)]
self._assert_import_kws(sugs, 'spacelib')
def test_resource_file_keywords(self):
sugs = self.ns.get_suggestions_for(self.kw, 'Resource Uk')
self._assert_import_kws(sugs, RESOURCES_HTML)
def test_resource_file_keyword_with_longname(self):
sugs = self.ns.get_suggestions_for(
self.kw, RESOURCES_HTML.replace('.html', '') + '.Resource Uk')
self._assert_import_kws(sugs, RESOURCES_HTML)
def test_keywords_normalization(self):
sugs = self.ns.get_suggestions_for(self.kw, 'Reso Urceuk')
self._assert_import_kws(sugs, RESOURCES_HTML)
def test_uk_from_resource_files_resource_file(self):
sugs = self.ns.get_suggestions_for(self.kw, 'UK From Text Resource')
self._assert_import_kws(sugs, 'resource.txt')
def test_resource_file_from_variable(self):
sugs = self.ns.get_suggestions_for(
self.kw, 'UK From Variable Resource')
self._assert_import_kws(sugs, 'resource_with_variables.txt')
def test_resource_file_from_resource_file_with_variable(self):
sugs = self.ns.get_suggestions_for(
self.kw, 'UK From Resource from Resource with Variable')
self._assert_import_kws(
sugs, 'resource_from_resource_with_variable.txt')
def test_library_from_resourcefile_variable(self):
sugs = self.ns.get_suggestions_for(self.kw, 'Execute Manual')
self._assert_import_kws(sugs, 'Dialogs')
def test_xml_library(self):
sugs = self.ns.get_suggestions_for(self._get_controller(
TESTCASEFILE_WITH_EVERYTHING).keywords[0], 'Attributeless Keyword')
self._assert_import_kws(sugs, 'LibSpecLibrary')
def test_xml_library_is_library_keyword(self):
everything_tcf = TestCaseFile(
source=TESTCASEFILE_WITH_EVERYTHING).populate()
assert_true(self.ns.is_library_keyword(
everything_tcf, 'Attributeless Keyword'))
def test_variable_path_separator(self):
sugs = self.ns.get_suggestions_for(
self._get_controller(TESTCASEFILE_WITH_EVERYTHING).keywords[0], 'foo')
self._assert_import_kws(sugs, 'even_more_resources.txt')
def test_keywords_only_once_per_source(self):
sugs = self.ns.get_suggestions_for(self.kw, '')
kw_set = []
for kw in sugs:
if self._not_variable(kw):
key = 'kw: %s %s' % (kw.name, kw.source)
assert_false(key in kw_set, key)
kw_set.append(key)
def _not_variable(self, item):
return not (item.name.startswith('$') or item.name.startswith('@'))
def test_global_variable_list_suggestions(self):
global_vars = [name for name in _VariableStash.global_variables]
self._test_global_variable(global_vars[0])
self._test_global_variable(global_vars[5])
self._test_global_variable(global_vars[-1])
def _test_global_variable(self, variable, expected=None):
assert_equals(
expected or variable, self.ns.get_suggestions_for(self.kw, variable)[0].name)
def test_resource_with_variable_in_path(self):
sugs = self.ns.get_suggestions_for(self.kw, 'Resu UK')
self._assert_import_kws(sugs, 'resu.txt')
def test_scalar_variable_suggestion(self):
scalar_vars = self.ns.get_suggestions_for(self.kw, '$')
assert_true(len(scalar_vars) > 0)
assert_true(
len(self.ns.get_suggestions_for(self.kw, '${')) == len(scalar_vars))
sug = self.ns.get_suggestions_for(self.kw, '${lib')
assert_true(sug[0].name == LIB_NAME_VARIABLE)
def test_list_variable_suggestion(self):
list_vars = self.ns.get_suggestions_for(self.kw, '@')
assert_true(len(list_vars) > 0)
assert_true(
len(self.ns.get_suggestions_for(self.kw, '@{')) == len(list_vars))
def test_dict_variable_suggestion(self):
dict_vars = self.ns.get_suggestions_for(self.kw, '&')
assert_true(len(dict_vars) > 0)
assert_true(
len(self.ns.get_suggestions_for(self.kw, '&{')) == len(dict_vars))
def test_variable_suggestions_without_varwrapping(self):
self._test_global_variable('space', '${SPACE}')
self._test_global_variable('EMP', '${EMPTY}')
def test_vars_from_file(self):
sugs = self.ns.get_suggestions_for(self._get_controller(TESTCASEFILE_WITH_EVERYTHING).keywords[0],
'${var_from_file')
assert_true(len(sugs) > 0)
def _get_controller(self, source):
return DataController(TestCaseFile(source=source).populate(), None)
def test_library_arguments_are_resolved(self):
sugs = self.ns.get_suggestions_for(self._get_controller(TESTCASEFILE_WITH_EVERYTHING).keywords[0],
'Get ')
assert_true(len(sugs) > 0)
for item in sugs:
if item.name == 'Get Mandatory':
return
raise AssertionError('Get mandatory not found')
def test_vars_from_path_resource_file(self):
sugs = self.ns.get_suggestions_for(self._get_controller(TESTCASEFILE_WITH_EVERYTHING).keywords[0],
'${Path RESOURCE var')
assert_true(len(sugs) > 0)
def test_variable_file_arguments_are_resolved(self):
sugs = self.ns.get_suggestions_for(self._get_controller(TESTCASEFILE_WITH_EVERYTHING).keywords[0],
'${dyn ')
assert_true(len(sugs) > 0)
def test_variable_file_variables_are_available_in_resource_imports(self):
sugs = self.ns.get_suggestions_for(self._get_controller(TESTCASEFILE_WITH_RESOURCES_WITH_VARIABLES_FROM_VARIABLE_FILE).tests[0],
'from resource with variable in pa')
self._assert_import_kws(sugs, 'res.txt')
def test_vars_from_keyword_arguments(self):
sugs = self.ns.get_suggestions_for(self.kw, '${keyword argu')
assert_equals(len(sugs), 2)
sugs = self.ns.get_suggestions_for(
self.kw, '${keyword argument with defau')
assert_equals(len(sugs), 1)
self._check_source(
self.kw, '${keyword argument with defau', ArgumentInfo.SOURCE)
def test_argument_is_superior_to_variable_from_variable_table(self):
sugs = self.ns.get_suggestions_for(self.kw, COLLIDING_ARGUMENT[0:4])
assert_true(any(True for s in sugs if s.source == ArgumentInfo.SOURCE))
def test_keyword_arguments_are_suggested_first(self):
sugs = self.ns.get_suggestions_for(self.kw, '')
self._assert_import_kws(sugs[:2], ArgumentInfo.SOURCE)
def test_suggestions_for_datafile(self):
sugs = self.ns.get_suggestions_for(self.tcf_ctrl, 'Execute Manual')
self._assert_import_kws(sugs, 'Dialogs')
sugs = self.ns.get_suggestions_for(self.tcf_ctrl, '${libna')
assert_true(len(sugs) == 1)
def test_variable_sources(self):
everything_tcf = self._get_controller(TESTCASEFILE_WITH_EVERYTHING)
self._check_source(everything_tcf, '${arg}', 'everything.html')
self._check_source(everything_tcf, '@{list}', 'everything.html')
self._check_source(everything_tcf, '${dynamic var}', 'dynamic_varz.py')
self._check_source(
everything_tcf, '${OPERATING SYSTEM}', 'another_resource.html')
def test_relative_imports(self):
relative_tcf = self._get_controller(RELATIVE_IMPORTS)
self._check_source(relative_tcf, 'local', 'local')
def _check_source(self, controller, name, source):
sugs = self.ns.get_suggestions_for(controller, name)
assert_equals(len(sugs), 1)
assert_equals(sugs[0].source, source)
def _assert_import_kws(self, sugs, source):
assert_true(len(sugs) > 0)
for s in sugs:
assert_true(s.source.endswith(source),
'%s does not end with %s' % (s.source, source))
def test_reset(self):
sugs = self.ns.get_suggestions_for(self.kw, 'generate random')
sugs2 = self.ns.get_suggestions_for(self.kw, 'generate random')
assert_true(sugs[0] is sugs2[0])
self.ns.reset_resource_and_library_cache()
sugs3 = self.ns.get_suggestions_for(self.kw, 'generate random')
assert_false(sugs[0] is sugs3[0])
class TestKeywordSearch(_DataFileTest):
def test_is_library_keyword(self):
assert_true(self.ns.is_library_keyword(self.tcf, 'Should Be Equal'))
assert_false(self.ns.is_library_keyword(self.tcf, 'kameli'))
assert_false(self.ns.is_library_keyword(
self.tcf, 'UK From Resource from Resource with Variable'))
def test_is_library_keyword_longname(self):
assert_true(
self.ns.is_library_keyword(self.tcf, 'Builtin.Should Be Equal'))
def test_is_library_keyword_longname_with_alias(self):
assert_true(
self.ns.is_library_keyword(self.tcf, TELNET_LIB_ALIAS+'.LOGIN'))
def test_find_default_keywords(self):
all_kws = self.ns.get_all_keywords([])
assert_is_not_none(all_kws)
self.assert_in_keywords(all_kws, 'Should Be Equal')
def test_find_suite_keywords(self):
everything_tcf = TestCaseFile(
source=TESTCASEFILE_WITH_EVERYTHING).populate()
all_kws = self.ns.get_all_keywords([self.tcf, everything_tcf])
self.assert_in_keywords(all_kws, 'Should be in keywords Uk',
'Copy List',
'Uk From Variable Resource')
self.assert_in_keywords(all_kws, 'My Test Setup',
'My Suite Teardown')
def test_resource_kws_only_once(self):
directory = TestDataDirectory(source=SIMPLE_TEST_SUITE_PATH).populate()
all_kws = self.ns.get_all_keywords(directory.children)
self._check_resource_keyword_only_once(all_kws)
def test_resource_kws_only_once_through_project(self):
project = construct_project(SIMPLE_TEST_SUITE_PATH)
all_kws = project.get_all_keywords()
project.close()
self._check_resource_keyword_only_once(all_kws)
def _check_resource_keyword_only_once(self, all_kws):
results = [(kw.name, kw.source)
for kw in all_kws if kw.name == "Only From Resource"]
assert_equals(len(results), 1)
assert_equals(
results[0], (u'Only From Resource', u'testdata_resource.txt'))
def test_find_user_keyword_name_normalized(self):
assert_is_not_none(self.ns.find_user_keyword(
self.tcf, 'UK Fromresource from rESOURCE with variaBLE'))
assert_is_none(self.ns.find_user_keyword(self.tcf, 'Copy List'))
def test_is_user_keyword(self):
assert_true(self.ns.is_user_keyword(
self.tcf, 'UKFromResource from ResourcewithVariable'))
assert_false(self.ns.is_user_keyword(self.tcf, 'hevoinen'))
assert_false(self.ns.is_user_keyword(self.tcf, 'Should Be Equal'))
def test_is_user_keyword_in_resource_file(self):
everything_tcf = TestCaseFile(
source=TESTCASEFILE_WITH_EVERYTHING).populate()
assert_is_not_none(
self.ns.find_user_keyword(everything_tcf, 'Duplicate UK'))
assert_true(self.ns.is_user_keyword(everything_tcf, 'Duplicate UK'))
assert_is_not_none(
self.ns.find_user_keyword(everything_tcf, 'Another Resource UK'))
assert_true(
self.ns.is_user_keyword(everything_tcf, 'Another Resource UK'))
def test_given_when_then_and_aliases(self):
assert_is_not_none(self.ns.find_user_keyword(
self.tcf, ' Given UK Fromresource from rESOURCE with variaBLE'))
assert_is_not_none(self.ns.find_user_keyword(
self.tcf, 'when UK Fromresource from rESOURCE with variaBLE'))
assert_is_not_none(self.ns.find_user_keyword(
self.tcf, ' then UK Fromresource from rESOURCE with variaBLE'))
assert_is_not_none(self.ns.find_user_keyword(
self.tcf, 'AND UK Fromresource from rESOURCE with variaBLE'))
assert_is_none(self.ns.find_user_keyword(
self.tcf, 'given and UK Fromresource from rESOURCE with variaBLE'))
def assert_in_keywords(self, keywords, *kw_names):
for kw_name in kw_names:
if not self._in_keywords(keywords, kw_name):
raise AssertionError(kw_name)
def _in_keywords(self, keywords, kw_name):
return any([kw_name.lower() == kw.name.lower() for kw in keywords])
class TestVariableStash(unittest.TestCase):
def _variable_stash_contains(self, name, vars):
assert_true('${{{0}}}'.format(name) in [v.name for v in vars])
def test_variable_resolving(self):
vars = _VariableStash()
var_table = VariableTable(ParentMock())
var_table.add('${var1}', 'foo')
var_table.add('${var2}', 'bar')
vars.set_from_variable_table(var_table)
result = vars.replace_variables('hoo${var1}hii${var2}huu')
assert_equals('hoofoohiibarhuu', result)
def test_list_variable_index_resolving(self):
vars = _VariableStash()
var_table = VariableTable(ParentMock())
var_table.add('@{var}', ['foo', 'bar'])
vars.set_from_variable_table(var_table)
assert_equals('Hi, foo!', vars.replace_variables('Hi, @{var}[0]!'))
def test_dict_variable_key_resolving(self):
vars = _VariableStash()
var_table = VariableTable(ParentMock())
var_table.add('&{var}', ['foo=bar'])
vars.set_from_variable_table(var_table)
assert_equals('Hi, bar!', vars.replace_variables('Hi, &{var}[foo]!'))
def test_variable_resolving_with_unresolvable_value(self):
vars = _VariableStash()
var_table = VariableTable(ParentMock())
var_table.add('${var1}', '${unresolvable variable}')
var_table.add('${var2}', 'bar')
vars.set_from_variable_table(var_table)
self._variable_stash_contains('var1', vars)
self._variable_stash_contains('var2', vars)
def test_has_default_values(self):
vars = _VariableStash()
self._variable_stash_contains('SPACE', vars)
self._variable_stash_contains('PREV_TEST_MESSAGE', vars)
def test_global_variable_trues_value_is_replaced_with_true(self):
assert_equals(_VariableStash().replace_variables('${True}'), True)
def test_global_variable_falses_value_is_replaced_with_false(self):
assert_equals(_VariableStash().replace_variables('${False}'), False)
def test_global_variable_nones_value_is_replaced_with_none(self):
assert_equals(_VariableStash().replace_variables('${None}'), None)
def test_global_variable_nulls_value_is_replaced_with_none(self):
assert_equals(_VariableStash().replace_variables('${null}'), None)
class TestResourceGetter(_DataFileTest):
def test_resource_getter(self):
resources = self.ns.get_resources(self.tcf)
assert_equals(len(resources), 8)
paths = []
for res in resources:
normalized = normpath(res.source)
assert_false(normalized in paths)
paths.append(normalized)
class TestResourceCache(_DataFileTest):
def setUp(self):
self._res_cache = self.ns._resource_factory
def test_file_read_only_once(self):
imp = Resource(None, RESOURCE_PATH)
first = self._res_cache.get_resource(imp.directory, imp.name)
second = self._res_cache.get_resource(imp.directory, imp.name)
assert_true(first is second)
def test_file_with_absolute_path(self):
imp = Resource(ParentMock(), RESOURCE_PATH)
assert_true(self._res_cache.get_resource(imp.directory, imp.name))
def test_file_with_invalid_path(self):
imp = Resource(ParentMock(), '${kumikameli}')
assert_is_none(self._res_cache.get_resource(imp.directory, imp.name))
if IS_WINDOWS:
def test_case_sensetive_filenames(self):
imp = Resource(None, RESOURCE_PATH)
first = self._res_cache.get_resource(
imp.directory, imp.name.lower())
second = self._res_cache.get_resource(
imp.directory, imp.name.upper())
assert_true(first is second)
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import print_function
from typing import Any, Callable, Iterable, List, Optional, Set, Tuple
from django.test import TestCase
from django.test.runner import DiscoverRunner
from django.test.signals import template_rendered
from zerver.lib.cache import bounce_key_prefix_for_testing
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.test_helpers import (
get_all_templates, write_instrumentation_reports,
)
import os
import subprocess
import sys
import time
import traceback
import unittest
def slow(slowness_reason):
# type: (str) -> Callable[[Callable], Callable]
'''
This is a decorate that annotates a test as being "known
to be slow." The decorator will set expected_run_time and slowness_reason
as atributes of the function. Other code can use this annotation
as needed, e.g. to exclude these tests in "fast" mode.
'''
def decorator(f):
# type: (Any) -> Any
f.slowness_reason = slowness_reason
return f
return decorator
def is_known_slow_test(test_method):
# type: (Any) -> bool
return hasattr(test_method, 'slowness_reason')
def full_test_name(test):
# type: (TestCase) -> str
test_module = test.__module__
test_class = test.__class__.__name__
test_method = test._testMethodName
return '%s.%s.%s' % (test_module, test_class, test_method)
def get_test_method(test):
# type: (TestCase) -> Callable[[], None]
return getattr(test, test._testMethodName)
# Each tuple is delay, test_name, slowness_reason
TEST_TIMINGS = [] # type: List[Tuple[float, str, str]]
def report_slow_tests():
# type: () -> None
timings = sorted(TEST_TIMINGS, reverse=True)
print('SLOWNESS REPORT')
print(' delay test')
print(' ---- ----')
for delay, test_name, slowness_reason in timings[:15]:
if not slowness_reason:
slowness_reason = 'UNKNOWN WHY SLOW, please investigate'
print(' %0.3f %s\n %s\n' % (delay, test_name, slowness_reason))
print('...')
for delay, test_name, slowness_reason in timings[100:]:
if slowness_reason:
print(' %.3f %s is not that slow' % (delay, test_name))
print(' consider removing @slow decorator')
print(' This may no longer be true: %s' % (slowness_reason,))
def enforce_timely_test_completion(test_method, test_name, delay):
# type: (Any, str, float) -> None
if hasattr(test_method, 'slowness_reason'):
max_delay = 1.1 # seconds
else:
max_delay = 0.4 # seconds
if delay > max_delay:
print(' ** Test is TOO slow: %s (%.3f s)' % (test_name, delay))
def fast_tests_only():
# type: () -> bool
return "FAST_TESTS_ONLY" in os.environ
def run_test(test):
# type: (TestCase) -> bool
failed = False
test_method = get_test_method(test)
if fast_tests_only() and is_known_slow_test(test_method):
return failed
test_name = full_test_name(test)
bounce_key_prefix_for_testing(test_name)
print('Running', test_name)
if not hasattr(test, "_pre_setup"):
# test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload
import_failure_prefix = 'unittest.loader.ModuleImportFailure.'
if test_name.startswith(import_failure_prefix):
actual_test_name = test_name[len(import_failure_prefix):]
print()
print("Actual test to be run is %s, but import failed." % (actual_test_name,))
print("Importing test module directly to generate clearer traceback:")
try:
command = ["python", "-c", "import %s" % (actual_test_name,)]
print("Import test command: `%s`" % (' '.join(command),))
subprocess.check_call(command)
except subprocess.CalledProcessError:
print("If that traceback is confusing, try doing the import inside `./manage.py shell`")
print()
return True
print("Import unexpectedly succeeded! Something is wrong.")
print("Try running `import %s` inside `./manage.py shell`" % (actual_test_name,))
print("If that works, you may have introduced an import cycle.")
return True
else:
print("Test doesn't have _pre_setup; something is wrong.")
print("Here's a debugger. Good luck!")
import pdb; pdb.set_trace()
test._pre_setup()
start_time = time.time()
test.setUp()
try:
test_method()
except unittest.SkipTest as e:
print('Skipped:', e)
except Exception:
failed = True
traceback.print_exc()
test.tearDown()
delay = time.time() - start_time
enforce_timely_test_completion(test_method, test_name, delay)
slowness_reason = getattr(test_method, 'slowness_reason', '')
TEST_TIMINGS.append((delay, test_name, slowness_reason))
test._post_teardown()
return failed
class Runner(DiscoverRunner):
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
DiscoverRunner.__init__(self, *args, **kwargs)
# `templates_rendered` holds templates which were rendered
# in proper logical tests.
self.templates_rendered = set() # type: Set[str]
# `shallow_tested_templates` holds templates which were rendered
# in `zerver.tests.test_templates`.
self.shallow_tested_templates = set() # type: Set[str]
template_rendered.connect(self.on_template_rendered)
def on_template_rendered(self, sender, context, **kwargs):
# type: (Any, Dict[str, Any], **Any) -> None
if hasattr(sender, 'template'):
template_name = sender.template.name
if template_name not in self.templates_rendered:
if context.get('shallow_tested'):
self.shallow_tested_templates.add(template_name)
else:
self.templates_rendered.add(template_name)
self.shallow_tested_templates.discard(template_name)
def get_shallow_tested_templates(self):
# type: () -> Set[str]
return self.shallow_tested_templates
def run_suite(self, suite, fatal_errors=True):
# type: (Iterable[TestCase], bool) -> bool
failed = False
for test in suite:
# The attributes __unittest_skip__ and __unittest_skip_why__ are undocumented
if hasattr(test, '__unittest_skip__') and test.__unittest_skip__:
print('Skipping', full_test_name(test), "(%s)" % (test.__unittest_skip_why__,))
elif run_test(test):
failed = True
if fatal_errors:
return failed
return failed
def run_tests(self, test_labels, extra_tests=None, **kwargs):
# type: (List[str], Optional[List[TestCase]], **Any) -> bool
self.setup_test_environment()
try:
suite = self.build_suite(test_labels, extra_tests)
except AttributeError:
traceback.print_exc()
print()
print(" This is often caused by a test module/class/function that doesn't exist or ")
print(" import properly. You can usually debug in a `manage.py shell` via e.g. ")
print(" import zerver.tests.test_messages")
print(" from zerver.tests.test_messages import StreamMessagesTest")
print(" StreamMessagesTest.test_message_to_stream")
print()
sys.exit(1)
# We have to do the next line to avoid flaky scenarios where we
# run a single test and getting an SA connection causes data from
# a Django connection to be rolled back mid-test.
get_sqlalchemy_connection()
failed = self.run_suite(suite, fatal_errors=kwargs.get('fatal_errors'))
self.teardown_test_environment()
if not failed:
write_instrumentation_reports()
return failed
|
|
from yaml import load
from yaml import YAMLError
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
SETTINGS_FILE = 'settings.yaml'
SETTINGS_STRUCT = {
'client_config_backend': {
'type': str,
'required': True,
'default': 'file',
'dependency': [
{
'value': 'file',
'attribute': ['client_config_file']
},
{
'value': 'settings',
'attribute': ['client_config']
},
{
'value': 'service',
'attribute': ['service_config']
}
]
},
'save_credentials': {
'type': bool,
'required': True,
'default': False,
'dependency': [
{
'value': True,
'attribute': ['save_credentials_backend']
}
]
},
'get_refresh_token': {
'type': bool,
'required': False,
'default': False
},
'client_config_file': {
'type': str,
'required': False,
'default': 'client_secrets.json'
},
'save_credentials_backend': {
'type': str,
'required': False,
'dependency': [
{
'value': 'file',
'attribute': ['save_credentials_file']
}
]
},
'client_config': {
'type': dict,
'required': False,
'struct': {
'client_id': {
'type': str,
'required': True
},
'client_secret': {
'type': str,
'required': True
},
'auth_uri': {
'type': str,
'required': True,
'default': 'https://accounts.google.com/o/oauth2/auth'
},
'token_uri': {
'type': str,
'required': True,
'default': 'https://accounts.google.com/o/oauth2/token'
},
'redirect_uri': {
'type': str,
'required': True,
'default': 'urn:ietf:wg:oauth:2.0:oob'
},
'revoke_uri': {
'type': str,
'required': True,
'default': None
}
}
},
'service_config': {
'type': dict,
'required': False,
'struct': {
'client_user_email': {
'type': str,
'required': True,
'default': None
},
'client_service_email': {
'type': str,
'required': True
},
'client_pkcs12_file_path': {
'type': str,
'required': True
}
}
},
'oauth_scope': {
'type': list,
'required': True,
'struct': str,
'default': ['https://www.googleapis.com/auth/drive']
},
'save_credentials_file': {
'type': str,
'required': False,
}
}
class SettingsError(IOError):
"""Error while loading/saving settings"""
class InvalidConfigError(IOError):
"""Error trying to read client configuration."""
def LoadSettingsFile(filename=SETTINGS_FILE):
"""Loads settings file in yaml format given file name.
:param filename: path for settings file. 'settings.yaml' by default.
:type filename: str.
:raises: SettingsError
"""
try:
stream = open(filename, 'r')
data = load(stream, Loader=Loader)
except (YAMLError, IOError) as e:
raise SettingsError(e)
return data
def ValidateSettings(data):
"""Validates if current settings is valid.
:param data: dictionary containing all settings.
:type data: dict.
:raises: InvalidConfigError
"""
_ValidateSettingsStruct(data, SETTINGS_STRUCT)
def _ValidateSettingsStruct(data, struct):
"""Validates if provided data fits provided structure.
:param data: dictionary containing settings.
:type data: dict.
:param struct: dictionary containing structure information of settings.
:type struct: dict.
:raises: InvalidConfigError
"""
# Validate required elements of the setting.
for key in struct:
if struct[key]['required']:
_ValidateSettingsElement(data, struct, key)
def _ValidateSettingsElement(data, struct, key):
"""Validates if provided element of settings data fits provided structure.
:param data: dictionary containing settings.
:type data: dict.
:param struct: dictionary containing structure information of settings.
:type struct: dict.
:param key: key of the settings element to validate.
:type key: str.
:raises: InvalidConfigError
"""
# Check if data exists. If not, check if default value exists.
value = data.get(key)
data_type = struct[key]['type']
if value is None:
try:
default = struct[key]['default']
except KeyError:
raise InvalidConfigError('Missing required setting %s' % key)
else:
data[key] = default
# If data exists, Check type of the data
elif type(value) is not data_type:
raise InvalidConfigError('Setting %s should be type %s' % (key, data_type))
# If type of this data is dict, check if structure of the data is valid.
if data_type is dict:
_ValidateSettingsStruct(data[key], struct[key]['struct'])
# If type of this data is list, check if all values in the list is valid.
elif data_type is list:
for element in data[key]:
if type(element) is not struct[key]['struct']:
raise InvalidConfigError('Setting %s should be list of %s' %
(key, struct[key]['struct']))
# Check dependency of this attribute.
dependencies = struct[key].get('dependency')
if dependencies:
for dependency in dependencies:
if value == dependency['value']:
for reqkey in dependency['attribute']:
_ValidateSettingsElement(data, struct, reqkey)
|
|
import os
import numpy as np
from tests.test_utils import run_track_tests
from mirdata import annotations
from mirdata.datasets import cante100
from tests.test_utils import DEFAULT_DATA_HOME
TEST_DATA_HOME = "tests/resources/mir_datasets/cante100"
def test_track():
default_trackid = "008"
dataset = cante100.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
expected_attributes = {
"artist": "Toronjo",
"duration": 179.0,
"audio_path": "tests/resources/mir_datasets/cante100/cante100audio/008_PacoToronjo_"
+ "Fandangos.mp3",
"f0_path": "tests/resources/mir_datasets/cante100/cante100midi_f0/008_PacoToronjo_"
+ "Fandangos.f0.csv",
"identifier": "4eebe839-82bb-426e-914d-7c4525dd9dad",
"notes_path": "tests/resources/mir_datasets/cante100/cante100_automaticTranscription/008_PacoToronjo_"
+ "Fandangos.notes.csv",
"release": "Atlas del cante flamenco",
"spectrogram_path": "tests/resources/mir_datasets/cante100/cante100_spectrum/008_PacoToronjo_"
+ "Fandangos.spectrum.csv",
"title": "Huelva Como Capital",
"track_id": "008",
}
expected_property_types = {
"melody": annotations.F0Data,
"notes": annotations.NoteData,
"audio": tuple,
"spectrogram": np.ndarray,
}
run_track_tests(track, expected_attributes, expected_property_types)
def test_to_jams():
default_trackid = "008"
dataset = cante100.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
jam = track.to_jams()
# Validate cante100 jam schema
assert jam.validate()
# Validate melody
melody = jam.search(namespace="pitch_contour")[0]["data"]
assert [note.time for note in melody] == [
0.023219954,
0.026122448,
0.029024942,
0.031927436,
0.034829931,
0.037732425,
]
assert [note.duration for note in melody] == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
assert [note.value for note in melody] == [
{"index": 0, "frequency": 0.0, "voiced": False},
{"index": 0, "frequency": 137.0, "voiced": True},
{"index": 0, "frequency": 220.34, "voiced": True},
{"index": 0, "frequency": 400.0, "voiced": True},
{"index": 0, "frequency": 110.0, "voiced": False},
{"index": 0, "frequency": 110.0, "voiced": False},
]
assert [note.confidence for note in melody] == [None, None, None, None, None, None]
# Validate note transciption
notes = jam.search(namespace="note_hz")[0]["data"]
assert [note.time for note in notes] == [
25.7625,
26.1457,
37.3319,
37.5612,
37.7876,
44.8755,
]
assert [note.duration for note in notes] == [
0.3453969999999984,
0.3947390000000013,
0.22349200000000025,
0.20317500000000166,
2.400359999999999,
0.2873469999999969,
]
assert [note.value for note in notes] == [
207.65234878997256,
207.65234878997256,
311.1269837220809,
369.9944227116344,
415.3046975799452,
391.9954359817492,
]
assert [note.confidence for note in notes] == [None, None, None, None, None, None]
def test_load_melody():
dataset = cante100.Dataset(TEST_DATA_HOME)
track = dataset.track("008")
f0_path = track.f0_path
f0_data = cante100.load_melody(f0_path)
# check types
assert type(f0_data) == annotations.F0Data
assert type(f0_data.times) is np.ndarray
assert type(f0_data.frequencies) is np.ndarray
assert type(f0_data.voicing) is np.ndarray
# check values
assert np.array_equal(
f0_data.times,
np.array(
[
0.023219954,
0.026122448,
0.029024942,
0.031927436,
0.034829931,
0.037732425,
]
),
)
assert np.array_equal(
f0_data.frequencies, np.array([0.0, 137.0, 220.34, 400.0, 110.0, 110.0])
)
assert np.array_equal(f0_data.voicing, np.array([0.0, 1.0, 1.0, 1.0, 0.0, 0.0]))
def test_load_notes():
dataset = cante100.Dataset(TEST_DATA_HOME)
track = dataset.track("008")
notes_path = track.notes_path
notes_data = cante100.load_notes(notes_path)
# check types
assert type(notes_data) == annotations.NoteData
assert type(notes_data.intervals) is np.ndarray
assert type(notes_data.pitches) is np.ndarray
assert type(notes_data.confidence) is np.ndarray
# check values
assert np.array_equal(
notes_data.intervals[:, 0],
np.array([25.7625, 26.1457, 37.3319, 37.5612, 37.7876, 44.8755]),
)
assert np.array_equal(
notes_data.intervals[:, 1],
np.array(
[
26.107896999999998,
26.540439000000003,
37.555392,
37.764375,
40.18796,
45.162847,
]
),
)
assert np.array_equal(
notes_data.pitches,
np.array(
[
207.65234878997256,
207.65234878997256,
311.1269837220809,
369.9944227116344,
415.3046975799452,
391.9954359817492,
]
),
)
assert np.array_equal(
notes_data.confidence, np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
)
def test_load_spectrum():
dataset = cante100.Dataset(TEST_DATA_HOME)
track = dataset.track("008")
spectrogram_path = track.spectrogram_path
spectrogram = cante100.load_spectrogram(spectrogram_path)
assert spectrogram.shape[0] == 5
assert spectrogram.shape[1] == 514
assert type(spectrogram) is np.ndarray
assert isinstance(spectrogram[0][0], float) is True
def test_load_audio():
dataset = cante100.Dataset(TEST_DATA_HOME)
track = dataset.track("008")
audio_path = track.audio_path
audio, sr = cante100.load_audio(audio_path)
assert sr == 22050
assert audio.shape[0] == 2 # Check audio is stereo
# assert audio.shape[1] == 3957696 # Check audio length
assert type(audio) is np.ndarray
def test_metadata():
data_home = "tests/resources/mir_datasets/cante100"
dataset = cante100.Dataset(data_home)
metadata = dataset._metadata
assert metadata["008"] == {
"musicBrainzID": "4eebe839-82bb-426e-914d-7c4525dd9dad",
"artist": "Toronjo",
"title": "Huelva Como Capital",
"release": "Atlas del cante flamenco",
"duration": 179,
}
|
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schemas for BigQuery tables / queries."""
from six.moves import collections_abc
from google.cloud.bigquery_v2 import types
_STRUCT_TYPES = ("RECORD", "STRUCT")
# SQL types reference:
# https://cloud.google.com/bigquery/data-types#legacy_sql_data_types
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
LEGACY_TO_STANDARD_TYPES = {
"STRING": types.StandardSqlDataType.STRING,
"BYTES": types.StandardSqlDataType.BYTES,
"INTEGER": types.StandardSqlDataType.INT64,
"INT64": types.StandardSqlDataType.INT64,
"FLOAT": types.StandardSqlDataType.FLOAT64,
"FLOAT64": types.StandardSqlDataType.FLOAT64,
"NUMERIC": types.StandardSqlDataType.NUMERIC,
"BOOLEAN": types.StandardSqlDataType.BOOL,
"BOOL": types.StandardSqlDataType.BOOL,
"GEOGRAPHY": types.StandardSqlDataType.GEOGRAPHY,
"RECORD": types.StandardSqlDataType.STRUCT,
"STRUCT": types.StandardSqlDataType.STRUCT,
"TIMESTAMP": types.StandardSqlDataType.TIMESTAMP,
"DATE": types.StandardSqlDataType.DATE,
"TIME": types.StandardSqlDataType.TIME,
"DATETIME": types.StandardSqlDataType.DATETIME,
# no direct conversion from ARRAY, the latter is represented by mode="REPEATED"
}
"""String names of the legacy SQL types to integer codes of Standard SQL types."""
class SchemaField(object):
"""Describe a single field within a table schema.
Args:
name (str): the name of the field.
field_type (str): the type of the field. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.type
mode (str): the mode of the field. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.mode
description (Optional[str]): description for the field.
fields (Tuple[google.cloud.bigquery.schema.SchemaField]):
subfields (requires ``field_type`` of 'RECORD').
"""
def __init__(self, name, field_type, mode="NULLABLE", description=None, fields=()):
self._name = name
self._field_type = field_type
self._mode = mode
self._description = description
self._fields = tuple(fields)
@classmethod
def from_api_repr(cls, api_repr):
"""Return a ``SchemaField`` object deserialized from a dictionary.
Args:
api_repr (Mapping[str, str]): The serialized representation
of the SchemaField, such as what is output by
:meth:`to_api_repr`.
Returns:
google.cloud.biquery.schema.SchemaField: The ``SchemaField`` object.
"""
# Handle optional properties with default values
mode = api_repr.get("mode", "NULLABLE")
description = api_repr.get("description")
fields = api_repr.get("fields", ())
return cls(
field_type=api_repr["type"].upper(),
fields=[cls.from_api_repr(f) for f in fields],
mode=mode.upper(),
description=description,
name=api_repr["name"],
)
@property
def name(self):
"""str: The name of the field."""
return self._name
@property
def field_type(self):
"""str: The type of the field.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.type
"""
return self._field_type
@property
def mode(self):
"""str: The mode of the field.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.mode
"""
return self._mode
@property
def is_nullable(self):
"""bool: whether 'mode' is 'nullable'."""
return self._mode == "NULLABLE"
@property
def description(self):
"""Optional[str]: description for the field."""
return self._description
@property
def fields(self):
"""tuple: Subfields contained in this field.
Must be empty unset if ``field_type`` is not 'RECORD'.
"""
return self._fields
def to_api_repr(self):
"""Return a dictionary representing this schema field.
Returns:
Dict: A dictionary representing the SchemaField in a serialized form.
"""
# Put together the basic representation. See http://bit.ly/2hOAT5u.
answer = {
"mode": self.mode.upper(),
"name": self.name,
"type": self.field_type.upper(),
"description": self.description,
}
# If this is a RECORD type, then sub-fields are also included,
# add this to the serialized representation.
if self.field_type.upper() in _STRUCT_TYPES:
answer["fields"] = [f.to_api_repr() for f in self.fields]
# Done; return the serialized dictionary.
return answer
def _key(self):
"""A tuple key that uniquely describes this field.
Used to compute this instance's hashcode and evaluate equality.
Returns:
Tuple: The contents of this :class:`~google.cloud.bigquery.schema.SchemaField`.
"""
return (
self._name,
self._field_type.upper(),
self._mode.upper(),
self._description,
self._fields,
)
def to_standard_sql(self):
"""Return the field as the standard SQL field representation object.
Returns:
An instance of :class:`~google.cloud.bigquery_v2.types.StandardSqlField`.
"""
sql_type = types.StandardSqlDataType()
if self.mode == "REPEATED":
sql_type.type_kind = types.StandardSqlDataType.ARRAY
else:
sql_type.type_kind = LEGACY_TO_STANDARD_TYPES.get(
self.field_type, types.StandardSqlDataType.TYPE_KIND_UNSPECIFIED
)
if sql_type.type_kind == types.StandardSqlDataType.ARRAY: # noqa: E721
array_element_type = LEGACY_TO_STANDARD_TYPES.get(
self.field_type, types.StandardSqlDataType.TYPE_KIND_UNSPECIFIED
)
sql_type.array_element_type.type_kind = array_element_type
# ARRAY cannot directly contain other arrays, only scalar types and STRUCTs
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#array-type
if array_element_type == types.StandardSqlDataType.STRUCT: # noqa: E721
sql_type.array_element_type.struct_type.fields.extend(
field.to_standard_sql() for field in self.fields
)
elif sql_type.type_kind == types.StandardSqlDataType.STRUCT: # noqa: E721
sql_type.struct_type.fields.extend(
field.to_standard_sql() for field in self.fields
)
return types.StandardSqlField(name=self.name, type=sql_type)
def __eq__(self, other):
if not isinstance(other, SchemaField):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __repr__(self):
return "SchemaField{}".format(self._key())
def _parse_schema_resource(info):
"""Parse a resource fragment into a schema field.
Args:
info: (Mapping[str, Dict]): should contain a "fields" key to be parsed
Returns:
Optional[Sequence[google.cloud.bigquery.schema.SchemaField`]:
A list of parsed fields, or ``None`` if no "fields" key found.
"""
if "fields" not in info:
return ()
schema = []
for r_field in info["fields"]:
name = r_field["name"]
field_type = r_field["type"]
mode = r_field.get("mode", "NULLABLE")
description = r_field.get("description")
sub_fields = _parse_schema_resource(r_field)
schema.append(SchemaField(name, field_type, mode, description, sub_fields))
return schema
def _build_schema_resource(fields):
"""Generate a resource fragment for a schema.
Args:
fields (Sequence[google.cloud.bigquery.schema.SchemaField): schema to be dumped.
Returns:
Sequence[Dict]: Mappings describing the schema of the supplied fields.
"""
return [field.to_api_repr() for field in fields]
def _to_schema_fields(schema):
"""Coerce `schema` to a list of schema field instances.
Args:
schema(Sequence[Union[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
Mapping[str, Any] \
]]):
Table schema to convert. If some items are passed as mappings,
their content must be compatible with
:meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.
Returns:
Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`]
Raises:
Exception: If ``schema`` is not a sequence, or if any item in the
sequence is not a :class:`~google.cloud.bigquery.schema.SchemaField`
instance or a compatible mapping representation of the field.
"""
for field in schema:
if not isinstance(field, (SchemaField, collections_abc.Mapping)):
raise ValueError(
"Schema items must either be fields or compatible "
"mapping representations."
)
return [
field if isinstance(field, SchemaField) else SchemaField.from_api_repr(field)
for field in schema
]
|
|
#!/usr/bin/env python
"""
Ansible Dynamic Inventory Script for Ubuntu MAAS.
This script fetches hosts data for Ansible from Ubuntu MAAS, using Tags to
identify groups and roles. It is expected that the script will be copied to
Tower within the new Inventory Scripts dialog offered within the interface,
where it will be passed the `--list` argument to invoke the dynamic inventory
process.
It is also possible to run as a standalone script or a replacement for the
Ansible `hosts` file.
See https://docs.ubuntu.com/maas/2.1/en/api for API details
:copyright: Internet Solutions (Pty) Ltd, 2015
:author: Paul Stevens <mailto:paul.stevens@is.co.za>
:copyright: Martijn van der kleijn, 2017
:author: Martijn van der Kleijn <mailto:martijn.niji@gmail.com>
:license: Released under the Apache 2.0 License. See LICENSE for details.
:version: 2.0.1
:date: 11 May 2017
"""
import argparse
import json
import os
import re
import sys
import uuid
import pickle
import oauth.oauth as oauth
import requests
class Inventory:
"""Provide several convenience methods to retrieve information from MAAS API."""
def __init__(self):
"""Check for precense of mandatory environment variables and route commands."""
self.supported = '2.0'
self.apikeydocs = 'https://docs.ubuntu.com/maas/2.1/en/manage-cli#log-in-(required)'
self.maas = os.environ.get("MAAS_API_URL", None)
if not self.maas:
sys.exit("MAAS_API_URL environment variable not found. Set this to http<s>://<HOSTNAME or IP>/MAAS/api/{}".format(self.supported))
self.token = os.environ.get("MAAS_API_KEY", None)
if not self.token:
sys.exit("MAAS_API_KEY environment variable not found. See {} for getting a MAAS API KEY".format(self.apikeydocs))
self.args = None
# Parse command line arguments
self.cli_handler()
if self.args.list:
print json.dumps(self.inventory(), sort_keys=True, indent=2)
elif self.args.host:
print json.dumps(self.host(), sort_keys=True, indent=2)
elif self.args.nodes:
print json.dumps(self.nodes(), sort_keys=True, indent=2)
elif self.args.tags:
print json.dumps(self.tags(), sort_keys=True, indent=2)
elif self.args.tag:
print json.dumps(self.tag(), sort_keys=True, indent=2)
elif self.args.supported:
print self.supportedVersion()
else:
sys.exit(1)
def supportedVersion(self):
"""Display MAAS API version supported by this tool."""
return self.supported
def auth(self):
"""Split the user's API key from MAAS into its component parts (Maas UI > Account > MAAS Keys)."""
(consumer_key, key, secret) = self.token.split(':')
# Format an OAuth header
resource_token_string = "oauth_token_secret={}&oauth_token={}".format(secret, key)
resource_token = oauth.OAuthToken.from_string(resource_token_string)
consumer_token = oauth.OAuthConsumer(consumer_key, "")
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer_token, token=resource_token, http_url=self.maas,
parameters={'auth_nonce': uuid.uuid4().get_hex()})
oauth_request.sign_request(
oauth.OAuthSignatureMethod_PLAINTEXT(), consumer_token, resource_token)
headers = oauth_request.to_header()
headers['Accept'] = 'application/json'
return headers
def host(self):
"""Return data on a single host/node."""
host = {}
headers = self.auth()
url = "{}/nodes/{}/".format(self.maas.rstrip(), self.args.host)
request = requests.get(url, headers=headers)
return json.loads(request.text)
def tags(self):
"""Fetch a simple list of available tags from MAAS."""
headers = self.auth()
url = "{}/tags/".format(self.maas.rstrip())
request = requests.get(url, headers=headers)
response = json.loads(request.text)
tag_list = [item["name"] for item in response]
return tag_list
def tag(self):
"""Fetch detailed information on a particular tag from MAAS."""
headers = self.auth()
url = "{}/tags/{}/?op=machines".format(self.maas.rstrip(), self.args.tag)
request = requests.get(url, headers=headers)
return json.loads(request.text)
def inventory(self):
"""Look up hosts by tag(s) and zone(s) and return a dict that Ansible will understand as an inventory."""
tags = self.tags()
ansible = {}
for tag in tags:
headers = self.auth()
url = "{}/tags/{}/?op=machines".format(self.maas.rstrip(), tag)
request = requests.get(url, headers=headers)
response = json.loads(request.text)
group_name = tag
hosts = []
for server in response:
if server['status_name'] == 'Deployed':
hosts.append(server['fqdn'])
ansible[group_name] = {
"hosts": hosts,
"vars": {}
}
nodes = self.nodes()
hosts = []
for node in nodes:
zone = node['zone']['name']
if node['node_type_name'] != 'Machine' or node['status_name'] != 'Deployed':
continue
hosts.append(node['fqdn'])
ansible[zone] = {
"hosts": hosts,
"vars": {}
}
# PS 2015-09-03: Create metadata block for Ansible's Dynamic Inventory
# The below code gets a dump of ALL nodes in MAAS and then builds out a _meta JSON attribute.
# node_dump = self.nodes()
# nodes = {
# '_meta': {
# 'hostvars': {}
# }
# }
#
# for node in node_dump:
# if not node['tag_names']:
# pass
# else:
# nodes['_meta']['hostvars'][node['hostname']] = {
# 'mac_address': node['macaddress_set'][0]['mac_address'],
# 'system_id': node['system_id'],
# 'power_type': node['power_type'],
# 'os': node['osystem'],
# 'os_release': node['distro_series']
# }
# Need to merge ansible and nodes dict()s as a shallow copy, or Ansible shits itself and throws an error
result = ansible.copy()
# result.update(nodes)
return result
def nodes(self):
"""Return a list of nodes from the MAAS API."""
headers = self.auth()
url = "%s/nodes/" % self.maas.rstrip()
request = requests.get(url, headers=headers)
response = json.loads(request.text)
return response
def cli_handler(self):
"""Manage command line options and arguments."""
parser = argparse.ArgumentParser(description='Dynamically produce an Ansible inventory from Ubuntu MAAS.', add_help=False)
parser.add_argument('-l', '--list', action='store_true', help='List instances by tag.')
parser.add_argument('-h', '--host', action='store', help='Get variables relating to a specific instance.')
parser.add_argument('-n', '--nodes', action='store_true', help='List all nodes registered under MAAS.')
parser.add_argument('-t', '--tags', action='store_true', help='List all tags registered under MAAS.')
parser.add_argument('--tag', action='store', help='Get details for a specific tag registered under MAAS.')
parser.add_argument('-s', '--supported', action='store_true', help='List which MAAS API version are supported.')
parser.add_argument('--help', action='help', help='Show this help message and exit.')
# Be kind and print help when no arguments given.
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
self.args = parser.parse_args()
if __name__ == "__main__":
Inventory()
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
1351.
"""
from test_framework.blocktools import (
create_block,
create_coinbase,
create_transaction,
make_conform_to_ctor,
)
from test_framework.messages import (
CTransaction,
FromHex,
ToHex,
msg_block,
msg_tx,
)
from test_framework.p2p import P2PInterface
from test_framework.script import (
OP_1NEGATE,
OP_CHECKLOCKTIMEVERIFY,
OP_DROP,
OP_TRUE,
CScript,
CScriptNum,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.txtools import pad_tx
from test_framework.util import assert_equal
CLTV_HEIGHT = 1351
def cltv_lock_to_height(node, tx, to_address, amount, height=-1):
'''Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make
a transaction that spends it.
This transforms the output script to anyone can spend (OP_TRUE) if the
lock time condition is valid.
Default height is -1 which leads CLTV to fail
TODO: test more ways that transactions using CLTV could be invalid (eg
locktime requirements fail, sequence time requirements fail, etc).
'''
height_op = OP_1NEGATE
if(height > 0):
tx.vin[0].nSequence = 0
tx.nLockTime = height
height_op = CScriptNum(height)
tx.vout[0].scriptPubKey = CScript(
[height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE])
pad_tx(tx)
fundtx_raw = node.signrawtransactionwithwallet(ToHex(tx))['hex']
fundtx = FromHex(CTransaction(), fundtx_raw)
fundtx.rehash()
# make spending tx
inputs = [{
"txid": fundtx.hash,
"vout": 0
}]
output = {to_address: amount}
spendtx_raw = node.createrawtransaction(inputs, output)
spendtx = FromHex(CTransaction(), spendtx_raw)
pad_tx(spendtx)
return fundtx, spendtx
class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-whitelist=noban@127.0.0.1',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
]]
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2))
self.coinbase_txids = [self.nodes[0].getblock(
b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info(
"Test that an invalid-according-to-CLTV transaction can still appear in a block")
fundtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=49990000)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49980000)
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(
CLTV_HEIGHT - 1), block_time)
block.nVersion = 3
block.vtx.append(fundtx)
# include the -1 CLTV in block
block.vtx.append(spendtx)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
peer.send_and_ping(msg_block(block))
# This block is valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 4")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info(
"Test that invalid-according-to-cltv transactions cannot appear in a block")
block.nVersion = 4
fundtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=49990000)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49980000)
# The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is
# valid.
peer.send_and_ping(msg_tx(fundtx))
assert fundtx.hash in self.nodes[0].getrawmempool()
# Mine a block containing the funding transaction
block.vtx.append(fundtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
peer.send_and_ping(msg_block(block))
# This block is valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# We show that this tx is invalid due to CLTV by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False,
'reject-reason': 'non-mandatory-script-verify-flag (Negative locktime)'}],
self.nodes[0].testmempoolaccept(
rawtxs=[spendtx.serialize().hex()], maxfeerate=0)
)
rejectedtx_signed = self.nodes[0].signrawtransactionwithwallet(
ToHex(spendtx))
# Couldn't complete signature due to CLTV
assert rejectedtx_signed['errors'][0]['error'] == 'Negative locktime'
tip = block.hash
block_time += 1
block = create_block(
block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['ConnectBlock {} failed, blk-bad-inputs'.format(block.hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), tip)
peer.sync_with_ping()
self.log.info(
"Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
fundtx = create_transaction(self.nodes[0], self.coinbase_txids[2],
self.nodeaddress, amount=49990000)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49980000, CLTV_HEIGHT)
# make sure sequence is nonfinal and locktime is good
spendtx.vin[0].nSequence = 0xfffffffe
spendtx.nLockTime = CLTV_HEIGHT
# both transactions are fully valid
self.nodes[0].sendrawtransaction(ToHex(fundtx))
self.nodes[0].sendrawtransaction(ToHex(spendtx))
# Modify the transactions in the block to be valid against CLTV
block.vtx.pop(1)
block.vtx.append(fundtx)
block.vtx.append(spendtx)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
peer.send_and_ping(msg_block(block))
# This block is now valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
if __name__ == '__main__':
BIP65Test().main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class TagsOperations(object):
"""TagsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2017-05-10".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-05-10"
self.config = config
def delete_value(
self, tag_name, tag_value, custom_headers=None, raw=False, **operation_config):
"""Deletes a tag value.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to delete.
:type tag_value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'
path_format_arguments = {
'tagName': self._serialize.url("tag_name", tag_name, 'str'),
'tagValue': self._serialize.url("tag_value", tag_value, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def create_or_update_value(
self, tag_name, tag_value, custom_headers=None, raw=False, **operation_config):
"""Creates a tag value. The name of the tag must already exist.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to create.
:type tag_value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TagValue or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.resource.resources.v2017_05_10.models.TagValue or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'
path_format_arguments = {
'tagName': self._serialize.url("tag_name", tag_name, 'str'),
'tagValue': self._serialize.url("tag_value", tag_value, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TagValue', response)
if response.status_code == 201:
deserialized = self._deserialize('TagValue', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, tag_name, custom_headers=None, raw=False, **operation_config):
"""Creates a tag in the subscription.
The tag name can have a maximum of 512 characters and is case
insensitive. Tag names created by Azure have prefixes of microsoft,
azure, or windows. You cannot create tags with one of these prefixes.
:param tag_name: The name of the tag to create.
:type tag_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TagDetails or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.resource.resources.v2017_05_10.models.TagDetails
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/tagNames/{tagName}'
path_format_arguments = {
'tagName': self._serialize.url("tag_name", tag_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TagDetails', response)
if response.status_code == 201:
deserialized = self._deserialize('TagDetails', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, tag_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a tag from the subscription.
You must remove all values from a resource tag before you can delete
it.
:param tag_name: The name of the tag.
:type tag_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/tagNames/{tagName}'
path_format_arguments = {
'tagName': self._serialize.url("tag_name", tag_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the names and values of all resource tags that are defined in a
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of TagDetails
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.TagDetailsPaged[~azure.mgmt.resource.resources.v2017_05_10.models.TagDetails]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/tagNames'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TagDetailsPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TagDetailsPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
def impaint_mask(img, label_colors=None, init_mask=None, init_label=None):
r"""
CommandLine:
python -m plottool_ibeis.interact_impaint --test-impaint_mask
References:
http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.html
TODO: Slider for transparency
TODO: Label selector
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_impaint import * # NOQA
>>> import utool as ut
>>> import vtool_ibeis as vt
>>> img_fpath = ut.grab_test_imgpath('lena.png')
>>> img = vt.imread(img_fpath)
>>> label_colors = [255, 200, 100, 0]
>>> result = impaint_mask(img, label_colors)
>>> # verify results
>>> print(result)
"""
import cv2
import numpy as np
print('begining impaint mask. c=circle, r=rect')
globals_ = dict(
drawing=False, # true if mouse is pressed
mode='rect', # if True, draw rectangle. Press 'm' to toggle to curve
color=255,
fgcolor=255,
bgcolor=0,
label_index=0,
radius=25,
transparency=.25,
ix=-1, iy=-1,
)
# mouse callback function
def draw_shape(x, y):
keys = ['mode', 'ix', 'iy', 'color', 'radius']
mode, ix, iy, color, radius = ut.dict_take(globals_, keys)
if mode == 'rect':
cv2.rectangle(mask, (ix, iy), (x, y), color, -1)
elif mode == 'circ':
cv2.circle(mask, (x, y), radius, color, -1)
def mouse_callback(event, x, y, flags, param):
#keys = ['drawing', 'mode', 'ix', 'iy', 'color']
#drawing, mode, ix, iy, color = ut.dict_take(globals_, keys)
if event in [cv2.EVENT_RBUTTONDOWN, cv2.EVENT_LBUTTONDOWN]:
globals_['drawing'] = True
globals_['ix'], globals_['iy'] = x, y
if event == cv2.EVENT_RBUTTONDOWN:
globals_['color'] = globals_['bgcolor']
elif event == cv2.EVENT_LBUTTONDOWN:
globals_['color'] = globals_['fgcolor']
elif event == cv2.EVENT_MOUSEMOVE:
if globals_['drawing'] is True:
draw_shape(x, y)
elif event in [cv2.EVENT_LBUTTONUP, cv2.EVENT_RBUTTONUP]:
globals_['drawing'] = False
draw_shape(x, y)
if event == cv2.EVENT_RBUTTONUP:
globals_['color'] = globals_['fgcolor']
elif event == cv2.EVENT_LBUTTONUP:
pass
#globals_['color'] = 255
if label_colors is None:
color_list = [255, 0]
else:
color_list = label_colors[:]
# Choose colors/labels to start with
if init_label is None:
init_color = 0
else:
init_color = color_list[init_label]
print('color_list = %r' % (color_list,))
print('init_color=%r' % (init_color,))
title = 'masking image'
if init_mask is not None:
try:
mask = init_mask[:, :, 0].copy()
except Exception:
mask = init_mask.copy()
else:
mask = np.zeros(img.shape[0:2], np.uint8) + init_color
transparent_mask = np.zeros(img.shape[0:2], np.float32)
cv2.namedWindow(title)
cv2.setMouseCallback(title, mouse_callback)
print('Valid Keys: r,c,t,l,q')
while(1):
# Blend images
transparency = globals_['transparency']
# Move from 0 to 1
np.divide(mask, 255.0, out=transparent_mask)
# Unmask room for a bit of transparency
np.multiply(transparent_mask, (1.0 - transparency), out=transparent_mask)
# Add a bit of transparency
np.add(transparent_mask, transparency, out=transparent_mask)
# Multiply the image by the transparency mask
masked_image = (img * transparent_mask[:, :, None]).astype(np.uint8)
cv2.imshow(title, masked_image)
keycode = cv2.waitKey(1) & 0xFF
if keycode == ord('r'):
globals_['mode'] = 'rect'
if keycode == ord('c'):
globals_['mode'] = 'circ'
if keycode == ord('t'):
globals_['transparency'] = (globals_['transparency'] + .25) % 1.0
if keycode == ord('l'):
globals_['label_index'] = (globals_['label_index'] + 1) % len(color_list)
globals_['fgcolor'] = color_list[globals_['label_index']]
print('fgcolor = %r' % (globals_['fgcolor'],))
if keycode == ord('q') or keycode == 27:
break
cv2.destroyAllWindows()
return mask
def cached_impaint(bgr_img, cached_mask_fpath=None, label_colors=None,
init_mask=None, aug=False, refine=False):
import vtool_ibeis as vt
if cached_mask_fpath is None:
cached_mask_fpath = 'image_' + ut.hashstr_arr(bgr_img) + '.png'
if aug:
cached_mask_fpath += '.' + ut.hashstr_arr(bgr_img)
if label_colors is not None:
cached_mask_fpath += ut.hashstr_arr(label_colors)
cached_mask_fpath += '.png'
#cached_mask_fpath = 'tmp_mask.png'
if refine or not ut.checkpath(cached_mask_fpath):
if refine and ut.checkpath(cached_mask_fpath):
if init_mask is None:
init_mask = vt.imread(cached_mask_fpath, grayscale=True)
custom_mask = impaint_mask(bgr_img, label_colors=label_colors, init_mask=init_mask)
vt.imwrite(cached_mask_fpath, custom_mask)
else:
custom_mask = vt.imread(cached_mask_fpath, grayscale=True)
return custom_mask
def demo():
r"""
CommandLine:
python -m plottool_ibeis.interact_impaint --test-demo
References:
http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.html
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_impaint import * # NOQA
>>> # build test data
>>> # execute function
>>> result = demo()
>>> # verify results
>>> print(result)
"""
import cv2
import numpy as np
globals_ = dict(
drawing=False, # true if mouse is pressed
mode=False, # if True, draw rectangle. Press 'm' to toggle to curve
ix=-1, iy=-1,
)
# mouse callback function
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
globals_['drawing'] = True
globals_['ix'], globals_['iy'] = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if globals_['drawing'] is True:
if globals_['mode'] is True:
cv2.rectangle(img, (globals_['ix'], globals_['iy']), (x, y), (0, 255, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
elif event == cv2.EVENT_LBUTTONUP:
globals_['drawing'] = False
if globals_['mode'] is True:
cv2.rectangle(img, (globals_['ix'], globals_['iy']), (x, y), (0, 255, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_circle)
while(1):
cv2.imshow('image', img)
keycode = cv2.waitKey(1) & 0xFF
if keycode == ord('m'):
globals_['mode'] = not globals_['mode']
elif keycode == 27:
break
cv2.destroyAllWindows()
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import logging
import os
from pkg_resources import DefaultProvider, ZipProvider, get_provider
from .common import Chroot, chmod_plus_x, open_zip, safe_mkdir, safe_mkdtemp
from .compatibility import to_bytes
from .compiler import Compiler
from .finders import get_entry_point_from_console_script, get_script_from_distributions
from .interpreter import PythonInterpreter
from .pex_info import PexInfo
from .util import CacheHelper, DistributionHelper
BOOTSTRAP_ENVIRONMENT = b"""
import os
import sys
__entry_point__ = None
if '__file__' in locals() and __file__ is not None:
__entry_point__ = os.path.dirname(__file__)
elif '__loader__' in locals():
from zipimport import zipimporter
from pkgutil import ImpLoader
if hasattr(__loader__, 'archive'):
__entry_point__ = __loader__.archive
elif isinstance(__loader__, ImpLoader):
__entry_point__ = os.path.dirname(__loader__.get_filename())
if __entry_point__ is None:
sys.stderr.write('Could not launch python executable!\\n')
sys.exit(2)
sys.path[0] = os.path.abspath(sys.path[0])
sys.path.insert(0, os.path.abspath(os.path.join(__entry_point__, '.bootstrap')))
from _pex.pex_bootstrapper import bootstrap_pex
bootstrap_pex(__entry_point__)
"""
class PEXBuilder(object):
"""Helper for building PEX environments."""
class Error(Exception): pass
class ImmutablePEX(Error): pass
class InvalidDistribution(Error): pass
class InvalidDependency(Error): pass
class InvalidExecutableSpecification(Error): pass
BOOTSTRAP_DIR = ".bootstrap"
def __init__(self, path=None, interpreter=None, chroot=None, pex_info=None, preamble=None,
copy=False):
"""Initialize a pex builder.
:keyword path: The path to write the PEX as it is built. If ``None`` is specified,
a temporary directory will be created.
:keyword interpreter: The interpreter to use to build this PEX environment. If ``None``
is specified, the current interpreter is used.
:keyword chroot: If specified, preexisting :class:`Chroot` to use for building the PEX.
:keyword pex_info: A preexisting PexInfo to use to build the PEX.
:keyword preamble: If supplied, execute this code prior to bootstrapping this PEX
environment.
:type preamble: str
:keyword copy: If False, attempt to create the pex environment via hard-linking, falling
back to copying across devices. If True, always copy.
.. versionchanged:: 0.8
The temporary directory created when ``path`` is not specified is now garbage collected on
interpreter exit.
"""
self._chroot = chroot or Chroot(path or safe_mkdtemp())
self._pex_info = pex_info or PexInfo.default()
self._frozen = False
self._interpreter = interpreter or PythonInterpreter.get()
self._shebang = self._interpreter.identity.hashbang()
self._logger = logging.getLogger(__name__)
self._preamble = to_bytes(preamble or '')
self._copy = copy
self._distributions = set()
def _ensure_unfrozen(self, name='Operation'):
if self._frozen:
raise self.ImmutablePEX('%s is not allowed on a frozen PEX!' % name)
@property
def interpreter(self):
return self._interpreter
def chroot(self):
return self._chroot
def clone(self, into=None):
"""Clone this PEX environment into a new PEXBuilder.
:keyword into: (optional) An optional destination directory to clone this PEXBuilder into. If
not specified, a temporary directory will be created.
Clones PEXBuilder into a new location. This is useful if the PEXBuilder has been frozen and
rendered immutable.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
"""
chroot_clone = self._chroot.clone(into=into)
clone = self.__class__(
chroot=chroot_clone,
interpreter=self._interpreter,
pex_info=self._pex_info.copy())
for dist in self._distributions:
clone.add_distribution(dist)
return clone
def path(self):
return self.chroot().path()
@property
def info(self):
return self._pex_info
@info.setter
def info(self, value):
if not isinstance(value, PexInfo):
raise TypeError('PEXBuilder.info must be a PexInfo!')
self._ensure_unfrozen('Changing PexInfo')
self._pex_info = value
def add_source(self, filename, env_filename):
"""Add a source to the PEX environment.
:param filename: The source filename to add to the PEX.
:param env_filename: The destination filename in the PEX. This path
must be a relative path.
"""
self._ensure_unfrozen('Adding source')
self._copy_or_link(filename, env_filename, "source")
def add_resource(self, filename, env_filename):
"""Add a resource to the PEX environment.
:param filename: The source filename to add to the PEX.
:param env_filename: The destination filename in the PEX. This path
must be a relative path.
"""
self._ensure_unfrozen('Adding a resource')
self._copy_or_link(filename, env_filename, "resource")
def add_requirement(self, req):
"""Add a requirement to the PEX environment.
:param req: A requirement that should be resolved in this environment.
.. versionchanged:: 0.8
Removed ``dynamic`` and ``repo`` keyword arguments as they were unused.
"""
self._ensure_unfrozen('Adding a requirement')
self._pex_info.add_requirement(req)
def set_executable(self, filename, env_filename=None):
"""Set the executable for this environment.
:param filename: The file that should be executed within the PEX environment when the PEX is
invoked.
:keyword env_filename: (optional) The name that the executable file should be stored as within
the PEX. By default this will be the base name of the given filename.
The entry point of the PEX may also be specified via ``PEXBuilder.set_entry_point``.
"""
self._ensure_unfrozen('Setting the executable')
if self._pex_info.script:
raise self.InvalidExecutableSpecification('Cannot set both entry point and script of PEX!')
if env_filename is None:
env_filename = os.path.basename(filename)
if self._chroot.get("executable"):
raise self.InvalidExecutableSpecification(
"Setting executable on a PEXBuilder that already has one!")
self._copy_or_link(filename, env_filename, "executable")
entry_point = env_filename
entry_point.replace(os.path.sep, '.')
self._pex_info.entry_point = entry_point.rpartition('.')[0]
def set_script(self, script):
"""Set the entry point of this PEX environment based upon a distribution script.
:param script: The script name as defined either by a console script or ordinary
script within the setup.py of one of the distributions added to the PEX.
:raises: :class:`PEXBuilder.InvalidExecutableSpecification` if the script is not found
in any distribution added to the PEX.
"""
# check if 'script' is a console_script
entry_point = get_entry_point_from_console_script(script, self._distributions)
if entry_point:
self.set_entry_point(entry_point)
return
# check if 'script' is an ordinary script
script_path, _, _ = get_script_from_distributions(script, self._distributions)
if script_path:
if self._pex_info.entry_point:
raise self.InvalidExecutableSpecification('Cannot set both entry point and script of PEX!')
self._pex_info.script = script
return
raise self.InvalidExecutableSpecification(
'Could not find script %r in any distribution %s within PEX!' % (
script, ', '.join(self._distributions)))
def set_entry_point(self, entry_point):
"""Set the entry point of this PEX environment.
:param entry_point: The entry point of the PEX in the form of ``module`` or ``module:symbol``,
or ``None``.
:type entry_point: string or None
By default the entry point is None. The behavior of a ``None`` entry point is dropping into
an interpreter. If ``module``, it will be executed via ``runpy.run_module``. If
``module:symbol``, it is equivalent to ``from module import symbol; symbol()``.
The entry point may also be specified via ``PEXBuilder.set_executable``.
"""
self._ensure_unfrozen('Setting an entry point')
self._pex_info.entry_point = entry_point
def set_shebang(self, shebang):
"""Set the exact shebang line for the PEX file.
For example, pex_builder.set_shebang('/home/wickman/Local/bin/python3.4'). This is
used to override the default behavior which is to have a #!/usr/bin/env line referencing an
interpreter compatible with the one used to build the PEX.
:param shebang: The shebang line minus the #!.
:type shebang: str
"""
self._shebang = '#!%s' % shebang
def _add_dist_dir(self, path, dist_name):
for root, _, files in os.walk(path):
for f in files:
filename = os.path.join(root, f)
relpath = os.path.relpath(filename, path)
target = os.path.join(self._pex_info.internal_cache, dist_name, relpath)
self._copy_or_link(filename, target)
return CacheHelper.dir_hash(path)
def _add_dist_zip(self, path, dist_name):
with open_zip(path) as zf:
for name in zf.namelist():
if name.endswith('/'):
continue
target = os.path.join(self._pex_info.internal_cache, dist_name, name)
self._chroot.write(zf.read(name), target)
return CacheHelper.zip_hash(zf)
def _prepare_code_hash(self):
self._pex_info.code_hash = CacheHelper.pex_hash(self._chroot.path())
def add_distribution(self, dist, dist_name=None):
"""Add a :class:`pkg_resources.Distribution` from its handle.
:param dist: The distribution to add to this environment.
:keyword dist_name: (optional) The name of the distribution e.g. 'Flask-0.10.0'. By default
this will be inferred from the distribution itself should it be formatted in a standard way.
:type dist: :class:`pkg_resources.Distribution`
"""
self._ensure_unfrozen('Adding a distribution')
dist_name = dist_name or os.path.basename(dist.location)
self._distributions.add(dist)
if os.path.isdir(dist.location):
dist_hash = self._add_dist_dir(dist.location, dist_name)
else:
dist_hash = self._add_dist_zip(dist.location, dist_name)
# add dependency key so that it can rapidly be retrieved from cache
self._pex_info.add_distribution(dist_name, dist_hash)
def add_dist_location(self, dist, name=None):
"""Add a distribution by its location on disk.
:param dist: The path to the distribution to add.
:keyword name: (optional) The name of the distribution, should the dist directory alone be
ambiguous. Packages contained within site-packages directories may require specifying
``name``.
:raises PEXBuilder.InvalidDistribution: When the path does not contain a matching distribution.
PEX supports packed and unpacked .whl and .egg distributions, as well as any distribution
supported by setuptools/pkg_resources.
"""
self._ensure_unfrozen('Adding a distribution')
bdist = DistributionHelper.distribution_from_path(dist)
if bdist is None:
raise self.InvalidDistribution('Could not find distribution at %s' % dist)
self.add_distribution(bdist)
self.add_requirement(bdist.as_requirement())
def add_egg(self, egg):
"""Alias for add_dist_location."""
self._ensure_unfrozen('Adding an egg')
return self.add_dist_location(egg)
# TODO(wickman) Consider changing this behavior to put the onus on the consumer
# of pex to write the pex sources correctly.
def _prepare_inits(self):
relative_digest = self._chroot.get("source")
init_digest = set()
for path in relative_digest:
split_path = path.split(os.path.sep)
for k in range(1, len(split_path)):
sub_path = os.path.sep.join(split_path[0:k] + ['__init__.py'])
if sub_path not in relative_digest and sub_path not in init_digest:
import_string = "__import__('pkg_resources').declare_namespace(__name__)"
try:
self._chroot.write(import_string, sub_path)
except TypeError:
# Python 3
self._chroot.write(bytes(import_string, 'UTF-8'), sub_path)
init_digest.add(sub_path)
def _precompile_source(self):
source_relpaths = [path for label in ('source', 'executable', 'main', 'bootstrap')
for path in self._chroot.filesets.get(label, ()) if path.endswith('.py')]
compiler = Compiler(self.interpreter)
compiled_relpaths = compiler.compile(self._chroot.path(), source_relpaths)
for compiled in compiled_relpaths:
self._chroot.touch(compiled, label='bytecode')
def _prepare_manifest(self):
self._chroot.write(self._pex_info.dump().encode('utf-8'), PexInfo.PATH, label='manifest')
def _prepare_main(self):
self._chroot.write(self._preamble + b'\n' + BOOTSTRAP_ENVIRONMENT,
'__main__.py', label='main')
def _copy_or_link(self, src, dst, label=None):
if self._copy:
self._chroot.copy(src, dst, label)
else:
self._chroot.link(src, dst, label)
# TODO(wickman) Ideally we unqualify our setuptools dependency and inherit whatever is
# bundled into the environment so long as it is compatible (and error out if not.)
#
# As it stands, we're picking and choosing the pieces we think we need, which means
# if there are bits of setuptools imported from elsewhere they may be incompatible with
# this.
def _prepare_bootstrap(self):
# Writes enough of setuptools into the .pex .bootstrap directory so that we can be fully
# self-contained.
wrote_setuptools = False
setuptools = DistributionHelper.distribution_from_path(
self._interpreter.get_location('setuptools'),
name='setuptools')
if setuptools is None:
raise RuntimeError('Failed to find setuptools while building pex!')
for fn, content_stream in DistributionHelper.walk_data(setuptools):
if fn.startswith('pkg_resources') or fn.startswith('_markerlib'):
if not fn.endswith('.pyc'): # We'll compile our own .pyc's later.
dst = os.path.join(self.BOOTSTRAP_DIR, fn)
self._chroot.write(content_stream.read(), dst, 'bootstrap')
wrote_setuptools = True
if not wrote_setuptools:
raise RuntimeError(
'Failed to extract pkg_resources from setuptools. Perhaps pants was linked with an '
'incompatible setuptools.')
libraries = {
'pex': '_pex',
}
for source_name, target_location in libraries.items():
provider = get_provider(source_name)
if not isinstance(provider, DefaultProvider):
mod = __import__(source_name, fromlist=['ignore'])
provider = ZipProvider(mod)
for fn in provider.resource_listdir(''):
if fn.endswith('.py'):
self._chroot.write(provider.get_resource_string(source_name, fn),
os.path.join(self.BOOTSTRAP_DIR, target_location, fn), 'bootstrap')
def freeze(self, bytecode_compile=True):
"""Freeze the PEX.
:param bytecode_compile: If True, precompile .py files into .pyc files when freezing code.
Freezing the PEX writes all the necessary metadata and environment bootstrapping code. It may
only be called once and renders the PEXBuilder immutable.
"""
self._ensure_unfrozen('Freezing the environment')
self._prepare_inits()
self._prepare_code_hash()
self._prepare_manifest()
self._prepare_bootstrap()
self._prepare_main()
if bytecode_compile:
self._precompile_source()
self._frozen = True
def build(self, filename, bytecode_compile=True):
"""Package the PEX into a zipfile.
:param filename: The filename where the PEX should be stored.
:param bytecode_compile: If True, precompile .py files into .pyc files.
If the PEXBuilder is not yet frozen, it will be frozen by ``build``. This renders the
PEXBuilder immutable.
"""
if not self._frozen:
self.freeze(bytecode_compile=bytecode_compile)
try:
os.unlink(filename + '~')
self._logger.warn('Previous binary unexpectedly exists, cleaning: %s' % (filename + '~'))
except OSError:
# The expectation is that the file does not exist, so continue
pass
if os.path.dirname(filename):
safe_mkdir(os.path.dirname(filename))
with open(filename + '~', 'ab') as pexfile:
assert os.path.getsize(pexfile.name) == 0
pexfile.write(to_bytes('%s\n' % self._shebang))
self._chroot.zip(filename + '~', mode='a')
if os.path.exists(filename):
os.unlink(filename)
os.rename(filename + '~', filename)
chmod_plus_x(filename)
|
|
""" Module for IGM calculations
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
from IPython import embed
from pkg_resources import resource_filename
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from astropy import units
from astropy.table import Table
from astropy.utils import isiterable
from astropy.cosmology import Planck15
from astropy import constants
from frb import halos
from frb import mw
def fukugita04_dict():
"""
Data from Fukugita 2004, Table 1
Returns:
f04_dict (dict): data dict.
"""
f04_dict = {}
f04_dict['M_sphere'] = 0.0015
f04_dict['M_disk'] = 0.00055
f04_dict['M_HI'] = 0.00062
f04_dict['M_H2'] = 0.00016
f04_dict['M_WD'] = 0.00036
f04_dict['M_NS'] = 0.00005
f04_dict['M_BH'] = 0.00007
f04_dict['M_BD'] = 0.00014
# Return
return f04_dict
def average_fHI(z, z_reion=7.):
"""
Average HI fraction
1 = neutral
0 = fully ionized
Args:
z (float or ndarray): redshift
z_reion (float, optional): redshift
of reionization.
Returns:
fHI (float or ndarray): float or ndarray
"""
z, flg_z = z_to_array(z)
fHI = np.zeros_like(z)
#
zion = z > z_reion
fHI[zion] = 1.
# Return
if flg_z:
return fHI
else:
return fHI[0]
def average_He_nume(z, z_HIreion=7.):
"""
Average number of electrons contributed by He as a function of redshift
per He nucleus
Following Kulkarni, Worseck & Hennawi 2018
https://arxiv.org/abs/1807.09774
Args:
z (float or ndarray): Redshift
z_HIreion (float, optional): Helium reionization
redshift.
Returns:
neHe (float or ndarray): Number of free electrons
per Helium nucelus.
"""
z, flg_z = z_to_array(z)
# Load Kulkarni Table
He_file = resource_filename('frb', os.path.join('data','IGM','qheIII.txt'))
qHeIII = Table.read(He_file, format='ascii')
# Fully re-ionized
first_ionized = np.where(qHeIII['Q_HeIII_18'] >= 1.)[0][0]
z_HeIIreion = qHeIII['z'][first_ionized]
#
fHeI = np.zeros_like(z)
fHeII = np.zeros_like(z)
# HeI ionized at HI reionization
zion = z > z_HIreion
fHeI[zion] = 1.
# HeII ionized at HeII reionization
zion2 = (z > z_HeIIreion) & (z < z_HIreion)
fi_HeIII = interp1d(qHeIII['z'], qHeIII['Q_HeIII_18'])
fHeII[zion2] = 1. - fi_HeIII(z[zion2])
# Combine
neHe = (1.-fHeI) + (1.-fHeII) # No 2 on the second term as the first one gives you the first electron
# Return
if flg_z:
return neHe
else:
return neHe[0]
def z_from_DM(DM, cosmo=Planck15, coord=None, corr_nuisance=True):
"""
Report back an estimated redshift from an input IGM DM
Any contributions from the Galaxy and/or host need to have been 'removed'
Args:
DM (Quantity): Dispersion measure.
cosmo (Cosmology, optional): Cosmology
of the universe. LambdaCDM with Planck15 parameters
used by default.
coord (SkyCoord, optional): If provided, use it to remove the ISM
corr_nuisance (bool, optional): If True, correct for the MW Halo
and the host with 100 DM units.
Returns:
z (float): Redshift
"""
if coord is not None:
DM_ISM = mw.ismDM(coord)
DM_use = DM - DM_ISM
else:
DM_use = DM
# Correct
if corr_nuisance:
DM_use -= 100 * units.pc/units.cm**3
# Calculate DMs
all_DM, zeval = average_DM(20., cosmo=cosmo, neval=20000, cumul=True)
# Interpolate
fint = interp1d(all_DM.value, zeval)
# Evaluate
z = fint(DM_use.to('pc/cm**3').value)
# Return
return z
def f_diffuse(z, cosmo=Planck15, return_rho = False):
"""
Calculate the cosmic fraction of baryons
in diffuse gas phase based on our empirical
knowledge of baryon distributions and their
ionization state.
Args:
z (float or ndarray): Redshift
cosmo (Cosmology, optional): Cosmology of
the universe.
return_rho (bool, optional): If true,
the diffuse gas density
is returned too.
Returns:
f_diffuse (float, ndarray): Diffuse gas baryon fraction.
rho_diffuse (Quantity): Physical diffuse gas density.
Returned if return_rho is set to true.
"""
# Get comoving baryon mass density
rho_b = cosmo.Ob0 * cosmo.critical_density0.to('Msun/Mpc**3')
# Dense components
rho_Mstar = avg_rhoMstar(z, remnants=True)
rho_ISM = avg_rhoISM(z, cosmo=cosmo)
# Diffuse gas fraction
f_diffuse = 1 - ((rho_Mstar+rho_ISM)/rho_b).value
if not return_rho:
return f_diffuse
else:
return f_diffuse, rho_b*f_diffuse*(1+z)**3
def ne_cosmic(z, cosmo = Planck15, mu = 4./3):
"""
Calculate the average cosmic electron
number density as a function of redshift.
Args:
z (float or ndarray): Redshift
cosmo (Cosmology, optional): Cosmology in
which the calculations are to be performed.
mu (float): Reduced mass
Returns:
ne_cosmic (Quantity): Average physical number
density of electrons in the unverse in cm^-3.
"""
# Get diffuse gas density
_, rho_diffuse = f_diffuse(z, cosmo=cosmo, return_rho=True)
# Number densities of H and He
n_H = (rho_diffuse/constants.m_p/mu).to('cm**-3')
n_He = n_H / 12. # 25% He mass fraction
# Compute electron number density
ne_cosmic = n_H * (1.-average_fHI(z)) + n_He*(average_He_nume(z))
return ne_cosmic
def average_DM(z, cosmo = Planck15, cumul=False, neval=10000, mu=4/3):
"""
Calculate the average cosmic DM 'expected' based on our empirical
knowledge of baryon distributions and their ionization state.
This includes both the IGM and galactic halos, i.e. any and all diffuse gas
Args:
z (float): Redshift
mu (float): Reduced mass correction for He when calculating n_H
cumul (bool, optional): Return the DM as a function of z
Returns:
DM (Quantity or Quantity array): DM values evaluated at
the required redshifts. An array is returned only if
cumul is True.
zeval (ndarray): evaluation redshifts. Only returned if
cumul is True.
"""
# Init
zeval, dz = np.linspace(0., z, neval,retstep=True)
# Get n_e as a function of z
n_e = ne_cosmic(zeval)
# Cosmology -- 2nd term is the (1+z) factor for DM
denom = cosmo.H(zeval) * (1+zeval) * (1+zeval)
# Time to Sum
DM_cum = (constants.c * np.cumsum(n_e * dz / denom)).to('pc/cm**3')
# Return
if cumul:
return DM_cum, zeval
else:
return DM_cum[-1]
def average_DMhalos(z, cosmo = Planck15, f_hot = 0.75, rmax=1., logMmin=10.3, neval = 10000, cumul=False):
"""
Average DM_halos term from halos along the sightline to an FRB
Args:
z (float): Redshift of the FRB
cosmo (Cosmology): Cosmology in which the calculations
are to be performed.
f_hot (float, optional): Fraction of the halo baryons in diffuse phase.
rmax (float, optional): Size of a halo in units of r200
logMmin (float, optional): Lowest mass halos to consider
Cannot be much below 10.3 or the Halo code barfs
The code deals with h^-1 factors, i.e. do not impose it yourself
neval (int, optional): Number of redshift values between
0 and z the function is evaluated at.
cumul (bool, optional): Return a cumulative evaluation?
Returns:
DM_halos (Quantity or Quantity array): One value if cumul=False
else evaluated at a series of z
zeval (ndarray): Evaluation redshifts if cumul=True
"""
zeval, dz = np.linspace(0, z, neval, retstep = True)
# Electron number density in the universe
ne_tot = ne_cosmic(zeval, cosmo = cosmo)
# Diffuse gas mass fraction
f_diff = f_diffuse(zeval, cosmo = cosmo)
# Fraction of total mass in halos
zvals = np.linspace(0, z, 20)
fhalos = halos.frac_in_halos(zvals, Mlow = 10**logMmin, Mhigh = 1e16, rmax = rmax)
fhalos_interp = IUS(zvals, fhalos)(zeval)
# Electron number density in halos only
ne_halos = ne_tot*fhalos_interp*f_hot/f_diff
# Cosmology -- 2nd term is the (1+z) factor for DM
denom = cosmo.H(zeval) * (1+zeval) * (1+zeval)
# DM halos
DM_halos = (constants.c * np.cumsum(ne_halos * dz / denom)).to('pc/cm**3')
# Return
if cumul:
return DM_halos, zeval
else:
return DM_halos[-1]
def average_DMIGM(z, cosmo = Planck15, f_hot = 0.75, rmax=1., logMmin=10.3, neval = 10000, cumul=False):
"""
Estimate DM_IGM in a cumulative fashion
Args:
z (float): Redshift of the FRB
cosmo (Cosmology, optional): Cosmology in which
the calculations are to be performed. LambdaCDM
with Planck15 parameters assumed by default.
f_hot (float, optional): Fraction of the halo
baryons in diffuse phase.
rmax (float, optional):
Size of a halo in units of r200
logMmin (float, optional):
Lowest mass halos to consider. Cannot be much below
10.3 or the Halo code barfs. The code deals with
h^-1 factors, i.e. do not impose it yourself
neval (int, optional): Number of redshift values between
0 and z the function is evaluated at.
cumul (bool, optional):
Return a cumulative evaluation?
Returns:
DM (Quantity or Quantity array): One value if cumul=False
else evaluated at a series of z
zeval (ndarray, optional): Evaluation redshifts if cumul=True
"""
# DM cosmic
DM_cosmic, zeval = average_DM(z, cosmo = cosmo, cumul=True, neval=neval)
# DM_halos
DM_halos, _ = average_DMhalos(z,cosmo = cosmo, logMmin = logMmin,
f_hot=f_hot, cumul = True, rmax = rmax, neval = neval)
# Subtract the two
DM_IGM = DM_cosmic - DM_halos
# Return
if cumul:
return DM_IGM, zeval
else:
return DM_IGM[-1]
def avg_rhoISM(z, cosmo=Planck15):
"""
Co-moving Mass density of the ISM
Interpolates from z=0 values to z=1 where
we assume M_ISM = M* and also for z>1
Args:
z (float or ndarray): Redshift
cosmo (Cosmology, optional): Cosmology in which
the calculations are to be performed. LambdaCDM
with Planck15 parameters assumed by default.
Returns:
rhoISM (Quantity): Units of Msun/Mpc^3
"""
# Init
z, flg_z = z_to_array(z)
# Mstar
rhoMstar = avg_rhoMstar(z, remnants=False)
# z=0 (Fukugita+ 2004)
f04_dict = fukugita04_dict()
M_ISM = f04_dict['M_HI'] + f04_dict['M_H2']
f_ISM_0 = M_ISM/(f04_dict['M_sphere']+f04_dict['M_disk'])
# Assume M_ISM = M* at z=1
f_ISM_1 = 1.
# Ages
t0 = cosmo.age(0.).to('Gyr').value
t1 = cosmo.age(1.).to('Gyr').value
t1_2 = (t0+t1)/2.
tval = cosmo.age(z).to('Gyr').value
# Interpolate
f_ISM = interp1d([t0, t1_2, t1], [f_ISM_0, 0.58, f_ISM_1], kind='quadratic',
bounds_error=False, fill_value=1.)
# Calculate
rhoISM_unitless = f_ISM(tval) * rhoMstar.value
# Finish
rhoISM = rhoISM_unitless * units.Msun / units.Mpc**3
#
return rhoISM
def avg_rhoMstar(z, remnants=True):
"""
Return mass density in stars as calculated by
Madau & Dickinson (2014)
Args:
z (float or ndarray): Redshift
remnants (bool, optional): Include remnants in the calculation?
Returns:
rho_Mstar (Quantity): Units of Msun/Mpc^3
"""
# Init
z, flg_z = z_to_array(z)
# Load
stellar_mass_file = resource_filename('frb', os.path.join('data','IGM','stellarmass.dat'))
rho_mstar_tbl = Table.read(stellar_mass_file, format='ascii')
# Output
rho_Mstar_unitless = np.zeros_like(z)
# Extrema
highz = z > rho_mstar_tbl['z'][-1]
rho_Mstar_unitless[highz] = rho_mstar_tbl['rho_Mstar'][-1]
# Interpolate
fint = interp1d(rho_mstar_tbl['z'], rho_mstar_tbl['rho_Mstar'], kind='cubic')
rho_Mstar_unitless[~highz] = fint(z[~highz])
# Finish
rho_Mstar = rho_Mstar_unitless * units.Msun / units.Mpc**3
# Remnants
if remnants:
# Fukugita 2004 (Table 1)
f04_dict = fukugita04_dict()
f_remnants = (f04_dict['M_WD'] + f04_dict['M_NS'] + f04_dict['M_BH'] + f04_dict['M_BD']) / (
f04_dict['M_sphere'] + f04_dict['M_disk'])
# Apply
rho_Mstar *= (1+f_remnants)
# Return
if flg_z:
return rho_Mstar
else:
return rho_Mstar[0]
def avg_rhoSFR(z):
"""
Average SFR density
Based on Madau & Dickinson (2014)
Parameters
----------
z: float or ndarray
Redshift
Returns
-------
SFR: Quantity
Units of Msun/yr/Mpc^3
"""
rho_SFR_unitless = 0.015 * (1+z)**2.7 / (1 + ((1+z)/2.9)**5.6)
rho_SFR = rho_SFR_unitless * units.Msun / units.yr / units.Mpc**3
# Return
return rho_SFR
def z_to_array(z):
"""
Convert input scalar or array to an array
Parameters
----------
z: float or ndarray
Redshift
Returns
-------
z: ndarray
flg_z: int
0 -- Input was a scalar
1 -- Input was an array
"""
# float or ndarray?
if not isiterable(z):
z = np.array([z])
flg_z = 0
else:
flg_z = 1
# Return
return z, flg_z
|
|
"""Tools for manipulating of large commutative expressions. """
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.compatibility import iterable, is_sequence, SYMPY_INTS, range
from sympy.core.mul import Mul, _keep_coeff
from sympy.core.power import Pow
from sympy.core.basic import Basic, preorder_traversal
from sympy.core.expr import Expr
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, Integer, Number, I
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.coreerrors import NonCommutativeExpression
from sympy.core.containers import Tuple, Dict
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import (common_prefix, common_suffix,
variations, ordered)
from collections import defaultdict
def _isnumber(i):
return isinstance(i, (SYMPY_INTS, float)) or i.is_Number
def decompose_power(expr):
"""
Decompose power into symbolic base and integer exponent.
This is strictly only valid if the exponent from which
the integer is extracted is itself an integer or the
base is positive. These conditions are assumed and not
checked here.
Examples
========
>>> from sympy.core.exprtools import decompose_power
>>> from sympy.abc import x, y
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(x**(2*y))
(x**y, 2)
>>> decompose_power(x**(2*y/3))
(x**(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
class Factors(object):
"""Efficient representation of ``f_1*f_2*...*f_n``."""
__slots__ = ['factors', 'gens']
def __init__(self, factors=None): # Factors
"""Initialize Factors from dict or expr.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x
>>> from sympy import I
>>> e = 2*x**3
>>> Factors(e)
Factors({2: 1, x: 3})
>>> Factors(e.as_powers_dict())
Factors({2: 1, x: 3})
>>> f = _
>>> f.factors # underlying dictionary
{2: 1, x: 3}
>>> f.gens # base of each factor
frozenset([2, x])
>>> Factors(0)
Factors({0: 1})
>>> Factors(I)
Factors({I: 1})
Notes
=====
Although a dictionary can be passed, only minimal checking is
performed: powers of -1 and I are made canonical.
"""
if isinstance(factors, (SYMPY_INTS, float)):
factors = S(factors)
if isinstance(factors, Factors):
factors = factors.factors.copy()
elif factors is None or factors is S.One:
factors = {}
elif factors is S.Zero or factors == 0:
factors = {S.Zero: S.One}
elif isinstance(factors, Number):
n = factors
factors = {}
if n < 0:
factors[S.NegativeOne] = S.One
n = -n
if n is not S.One:
if n.is_Float or n.is_Integer or n is S.Infinity:
factors[n] = S.One
elif n.is_Rational:
# since we're processing Numbers, the denominator is
# stored with a negative exponent; all other factors
# are left .
if n.p != 1:
factors[Integer(n.p)] = S.One
factors[Integer(n.q)] = S.NegativeOne
else:
raise ValueError('Expected Float|Rational|Integer, not %s' % n)
elif isinstance(factors, Basic) and not factors.args:
factors = {factors: S.One}
elif isinstance(factors, Expr):
c, nc = factors.args_cnc()
i = c.count(I)
for _ in range(i):
c.remove(I)
factors = dict(Mul._from_args(c).as_powers_dict())
if i:
factors[I] = S.One*i
if nc:
factors[Mul(*nc, evaluate=False)] = S.One
else:
factors = factors.copy() # /!\ should be dict-like
# tidy up -/+1 and I exponents if Rational
handle = []
for k in factors:
if k is I or k in (-1, 1):
handle.append(k)
if handle:
i1 = S.One
for k in handle:
if not _isnumber(factors[k]):
continue
i1 *= k**factors.pop(k)
if i1 is not S.One:
for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e
if a is S.NegativeOne:
factors[a] = S.One
elif a is I:
factors[I] = S.One
elif a.is_Pow:
if S.NegativeOne not in factors:
factors[S.NegativeOne] = S.Zero
factors[S.NegativeOne] += a.exp
elif a == 1:
factors[a] = S.One
elif a == -1:
factors[-a] = S.One
factors[S.NegativeOne] = S.One
else:
raise ValueError('unexpected factor in i1: %s' % a)
self.factors = factors
try:
self.gens = frozenset(factors.keys())
except AttributeError:
raise TypeError('expecting Expr or dictionary')
def __hash__(self): # Factors
keys = tuple(ordered(self.factors.keys()))
values = [self.factors[k] for k in keys]
return hash((keys, values))
def __repr__(self): # Factors
return "Factors({%s})" % ', '.join(
['%s: %s' % (k, v) for k, v in ordered(self.factors.items())])
@property
def is_zero(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(0).is_zero
True
"""
f = self.factors
return len(f) == 1 and S.Zero in f
@property
def is_one(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(1).is_one
True
"""
return not self.factors
def as_expr(self): # Factors
"""Return the underlying expression.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> Factors((x*y**2).as_powers_dict()).as_expr()
x*y**2
"""
args = []
for factor, exp in self.factors.items():
if exp != 1:
b, e = factor.as_base_exp()
if isinstance(exp, int):
e = _keep_coeff(Integer(exp), e)
elif isinstance(exp, Rational):
e = _keep_coeff(exp, e)
else:
e *= exp
args.append(b**e)
else:
args.append(factor)
return Mul(*args)
def mul(self, other): # Factors
"""Return Factors of ``self * other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.mul(b)
Factors({x: 2, y: 3, z: -1})
>>> a*b
Factors({x: 2, y: 3, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def normal(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
The only differences between this and method ``div`` is that this
is 1) optimized for the case when there are few factors in common and
2) this does not raise an error if ``other`` is zero.
See Also
========
div
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return (Factors(), Factors(S.Zero))
if self.is_zero:
return (Factors(S.Zero), Factors())
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.items():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
elif _isnumber(exp):
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
else:
r = self_exp.extract_additively(other_exp)
if r is not None:
if r:
self_factors[factor] = r
del other_factors[factor]
else: # should be handled already
del self_factors[factor]
del other_factors[factor]
else:
sc, sa = self_exp.as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
self_factors[factor] -= oc
other_exp = oa
elif diff < 0:
self_factors[factor] -= sc
other_factors[factor] -= sc
other_exp = oa - diff
else:
self_factors[factor] = sa
other_exp = oa
if other_exp:
other_factors[factor] = other_exp
else:
del other_factors[factor]
return Factors(self_factors), Factors(other_factors)
def div(self, other): # Factors
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
This is optimized for the case when there are many factors in common.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> a = Factors((x*y**2).as_powers_dict())
>>> a.div(a)
(Factors({}), Factors({}))
>>> a.div(x*z)
(Factors({y: 2}), Factors({z: 1}))
The ``/`` operator only gives ``quo``:
>>> a/x
Factors({y: 2})
Factors treats its factors as though they are all in the numerator, so
if you violate this assumption the results will be correct but will
not strictly correspond to the numerator and denominator of the ratio:
>>> a.div(x/z)
(Factors({y: 2}), Factors({z: -1}))
Factors is also naive about bases: it does not attempt any denesting
of Rational-base terms, for example the following does not become
2**(2*x)/2.
>>> Factors(2**(2*x + 2)).div(S(8))
(Factors({2: 2*x + 2}), Factors({8: 1}))
factor_terms can clean up such Rational-bases powers:
>>> from sympy.core.exprtools import factor_terms
>>> n, d = Factors(2**(2*x + 2)).div(S(8))
>>> n.as_expr()/d.as_expr()
2**(2*x + 2)/8
>>> factor_terms(_)
2**(2*x)/2
"""
quo, rem = dict(self.factors), {}
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
raise ZeroDivisionError
if self.is_zero:
return (Factors(S.Zero), Factors())
for factor, exp in other.factors.items():
if factor in quo:
d = quo[factor] - exp
if _isnumber(d):
if d <= 0:
del quo[factor]
if d >= 0:
if d:
quo[factor] = d
continue
exp = -d
else:
r = quo[factor].extract_additively(exp)
if r is not None:
if r:
quo[factor] = r
else: # should be handled already
del quo[factor]
else:
other_exp = exp
sc, sa = quo[factor].as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
quo[factor] -= oc
other_exp = oa
elif diff < 0:
quo[factor] -= sc
other_exp = oa - diff
else:
quo[factor] = sa
other_exp = oa
if other_exp:
rem[factor] = other_exp
else:
assert factor not in rem
continue
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other): # Factors
"""Return numerator Factor of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.quo(b) # same as a/b
Factors({y: 1})
"""
return self.div(other)[0]
def rem(self, other): # Factors
"""Return denominator Factors of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.rem(b)
Factors({z: -1})
>>> a.rem(a)
Factors({})
"""
return self.div(other)[1]
def pow(self, other): # Factors
"""Return self raised to a non-negative integer power.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> a = Factors((x*y**2).as_powers_dict())
>>> a**2
Factors({x: 2, y: 4})
"""
if isinstance(other, Factors):
other = other.as_expr()
if other.is_Integer:
other = int(other)
if isinstance(other, SYMPY_INTS) and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.items():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other): # Factors
"""Return Factors of ``gcd(self, other)``. The keys are
the intersection of factors with the minimum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.gcd(b)
Factors({x: 1, y: 1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(self.factors)
factors = {}
for factor, exp in self.factors.items():
factor, exp = sympify(factor), sympify(exp)
if factor in other.factors:
lt = (exp - other.factors[factor]).is_negative
if lt == True:
factors[factor] = exp
elif lt == False:
factors[factor] = other.factors[factor]
return Factors(factors)
def lcm(self, other): # Factors
"""Return Factors of ``lcm(self, other)`` which are
the union of factors with the maximum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.lcm(b)
Factors({x: 1, y: 2, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other): # Factors
return self.mul(other)
def __divmod__(self, other): # Factors
return self.div(other)
def __div__(self, other): # Factors
return self.quo(other)
__truediv__ = __div__
def __mod__(self, other): # Factors
return self.rem(other)
def __pow__(self, other): # Factors
return self.pow(other)
def __eq__(self, other): # Factors
if not isinstance(other, Factors):
other = Factors(other)
return self.factors == other.factors
def __ne__(self, other): # Factors
return not self.__eq__(other)
class Term(object):
"""Efficient representation of ``coeff*(numer/denom)``. """
__slots__ = ['coeff', 'numer', 'denom']
def __init__(self, term, numer=None, denom=None): # Term
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression(
'commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = defaultdict(int), defaultdict(int)
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] += exp
else:
denom[base] += -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self): # Term
return hash((self.coeff, self.numer, self.denom))
def __repr__(self): # Term
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self): # Term
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other): # Term
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self): # Term
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other): # Term
return self.mul(other.inv())
def pow(self, other): # Term
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other): # Term
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other): # Term
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other): # Term
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __div__(self, other): # Term
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __pow__(self, other): # Term
if isinstance(other, SYMPY_INTS):
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other): # Term
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other): # Term
return not self.__eq__(other)
def _gcd_terms(terms, isprimitive=False, fraction=True):
"""Helper function for :func:`gcd_terms`.
If ``isprimitive`` is True then the call to primitive
for an Add will be skipped. This is useful when the
content has already been extrated.
If ``fraction`` is True then the expression will appear over a common
denominator, the lcm of all term denominators.
"""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
terms = list(map(Term, [t for t in terms if t]))
# there is some simplification that may happen if we leave this
# here rather than duplicate it before the mapping of Term onto
# the terms
if len(terms) == 0:
return S.Zero, S.Zero, S.One
if len(terms) == 1:
cont = terms[0].coeff
numer = terms[0].numer.as_expr()
denom = terms[0].denom.as_expr()
else:
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
if fraction:
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
else:
numers = [t.as_expr() for t in terms]
denom = Term(S(1)).numer
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True, fraction=True):
"""Compute the GCD of ``terms`` and put them together.
``terms`` can be an expression or a non-Basic sequence of expressions
which will be handled as though they are terms from a sum.
If ``isprimitive`` is True the _gcd_terms will not run the primitive
method on the terms.
``clear`` controls the removal of integers from the denominator of an Add
expression. When True (default), all numerical denominator will be cleared;
when False the denominators will be cleared only if all terms had numerical
denominators other than 1.
``fraction``, when True (default), will put the expression over a common
denominator.
Examples
========
>>> from sympy.core import gcd_terms
>>> from sympy.abc import x, y
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
>>> gcd_terms(x/2 + 1/x)
(x**2 + 2)/(2*x)
>>> gcd_terms(x/2 + 1/x, fraction=False)
(x + 2/x)/2
>>> gcd_terms(x/2 + 1/x, fraction=False, clear=False)
x/2 + 1/x
>>> gcd_terms(x/2/y + 1/x/y)
(x**2 + 2)/(2*x*y)
>>> gcd_terms(x/2/y + 1/x/y, fraction=False, clear=False)
(x + 2/x)/(2*y)
The ``clear`` flag was ignored in this case because the returned
expression was a rational expression, not a simple sum.
See Also
========
factor_terms, sympy.polys.polytools.terms_gcd
"""
def mask(terms):
"""replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul._from_args(nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul._from_args(c)
else:
args[i] = c
return args, dict(reps)
isadd = isinstance(terms, Add)
addlike = isadd or not isinstance(terms, Basic) and \
is_sequence(terms, include=set) and \
not isinstance(terms, Dict)
if addlike:
if isadd: # i.e. an Add
terms = list(terms.args)
else:
terms = sympify(terms)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive, fraction)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if not isinstance(terms, Basic):
return terms
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear, fraction)
for i in args]), clear=clear)
def handle(a):
# don't treat internal args like terms of an Add
if not isinstance(a, Expr):
if isinstance(a, Basic):
return a.func(*[handle(i) for i in a.args])
return type(a)([handle(i) for i in a])
return gcd_terms(a, isprimitive, clear, fraction)
if isinstance(terms, Dict):
return Dict(*[(k, handle(v)) for k, v in terms.args])
return terms.func(*[handle(i) for i in terms.args])
def factor_terms(expr, radical=False, clear=False, fraction=False, sign=True):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
If fraction=True (default is False) then a common denominator will be
constructed for the expression.
If sign=True (default) then even if the only factor in common is a -1,
it will be factored out of the expression.
Examples
========
>>> from sympy import factor_terms, Symbol
>>> from sympy.abc import x, y
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When ``clear`` is False, a rational will only be factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
This only applies when there is a single Add that the coefficient
multiplies:
>>> factor_terms(x*y/2 + y, clear=True)
y*(x + 2)/2
>>> factor_terms(x*y/2 + y, clear=False) == _
True
If a -1 is all that can be factored out, to *not* factor it out, the
flag ``sign`` must be False:
>>> factor_terms(-x - y)
-(x + y)
>>> factor_terms(-x - y, sign=False)
-x - y
>>> factor_terms(-2*x - 2*y, sign=False)
-2*(x + y)
See Also
========
gcd_terms, sympy.polys.polytools.terms_gcd
"""
def do(expr):
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([do(i) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or \
is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([do(i) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
cont, p = expr.as_content_primitive(radical=radical)
if p.is_Add:
list_args = [do(a) for a in Add.make_args(p)]
# get a common negative (if there) which gcd_terms does not remove
if all(a.as_coeff_Mul()[0] < 0 for a in list_args):
cont = -cont
list_args = [-a for a in list_args]
# watch out for exp(-(x+2)) which gcd_terms will change to exp(-x-2)
special = {}
for i, a in enumerate(list_args):
b, e = a.as_base_exp()
if e.is_Mul and e != Mul(*e.args):
list_args[i] = Dummy()
special[list_args[i]] = a
# rebuild p not worrying about the order which gcd_terms will fix
p = Add._from_args(list_args)
p = gcd_terms(p,
isprimitive=True,
clear=clear,
fraction=fraction).xreplace(special)
elif p.args:
p = p.func(
*[do(a) for a in p.args])
rv = _keep_coeff(cont, p, clear=clear, sign=sign)
return rv
expr = sympify(expr)
return do(expr)
def _mask_nc(eq, name=None):
"""
Return ``eq`` with non-commutative objects replaced with Dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is noncommutative
and cannot be made commutative. The third value returned is a list
of any non-commutative symbols that appear in the returned equation.
``name``, if given, is the name that will be used with numered Dummy
variables that will replace the non-commutative objects and is mainly
used for doctesting purposes.
Notes
=====
All non-commutative objects other than Symbols are replaced with
a non-commutative Symbol. Identical objects will be identified
by identical symbols.
If there is only 1 non-commutative object in an expression it will
be replaced with a commutative symbol. Otherwise, the non-commutative
entities are retained and the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Examples
========
>>> from sympy.physics.secondquant import Commutator, NO, F, Fd
>>> from sympy import symbols, Mul
>>> from sympy.core.exprtools import _mask_nc
>>> from sympy.abc import x, y
>>> A, B, C = symbols('A,B,C', commutative=False)
One nc-symbol:
>>> _mask_nc(A**2 - x**2, 'd')
(_d0**2 - x**2, {_d0: A}, [])
Multiple nc-symbols:
>>> _mask_nc(A**2 - B**2, 'd')
(A**2 - B**2, None, [A, B])
An nc-object with nc-symbols but no others outside of it:
>>> _mask_nc(1 + x*Commutator(A, B), 'd')
(_d0*x + 1, {_d0: Commutator(A, B)}, [])
>>> _mask_nc(NO(Fd(x)*F(y)), 'd')
(_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
Multiple nc-objects:
>>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B)
>>> _mask_nc(eq, 'd')
(x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1])
Multiple nc-objects and nc-symbols:
>>> eq = A*Commutator(A, B) + B*Commutator(A, C)
>>> _mask_nc(eq, 'd')
(A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B])
If there is an object that:
- doesn't contain nc-symbols
- but has arguments which derive from Basic, not Expr
- and doesn't define an _eval_is_commutative routine
then it will give False (or None?) for the is_commutative test. Such
objects are also removed by this routine:
>>> from sympy import Basic
>>> eq = (1 + Mul(Basic(), Basic(), evaluate=False))
>>> eq.is_commutative
False
>>> _mask_nc(eq, 'd')
(_d0**2 + 1, {_d0: Basic()}, [])
"""
name = name or 'mask'
# Make Dummy() append sequential numbers to the name
def numbered_names():
i = 0
while True:
yield name + str(i)
i += 1
names = numbered_names()
def Dummy(*args, **kwargs):
from sympy import Dummy
return Dummy(next(names), *args, **kwargs)
expr = eq
if expr.is_commutative:
return eq, {}, []
# identify nc-objects; symbols and other
rep = []
nc_obj = set()
nc_syms = set()
pot = preorder_traversal(expr, keys=default_sort_key)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pot.skip()
elif not a.is_commutative:
if a.is_Symbol:
nc_syms.add(a)
elif not (a.is_Add or a.is_Mul or a.is_Pow):
if all(s.is_commutative for s in a.free_symbols):
rep.append((a, Dummy()))
else:
nc_obj.add(a)
pot.skip()
# If there is only one nc symbol or object, it can be factored regularly
# but polys is going to complain, so replace it with a Dummy.
if len(nc_obj) == 1 and not nc_syms:
rep.append((nc_obj.pop(), Dummy()))
elif len(nc_syms) == 1 and not nc_obj:
rep.append((nc_syms.pop(), Dummy()))
# Any remaining nc-objects will be replaced with an nc-Dummy and
# identified as an nc-Symbol to watch out for
nc_obj = sorted(nc_obj, key=default_sort_key)
for n in nc_obj:
nc = Dummy(commutative=False)
rep.append((n, nc))
nc_syms.add(nc)
expr = expr.subs(rep)
nc_syms = list(nc_syms)
nc_syms.sort(key=default_sort_key)
return expr, dict([(v, k) for k, v in rep]) or None, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
**examples**
>>> from sympy.core.exprtools import factor_nc
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
from sympy.simplify.simplify import powsimp
from sympy.polys import gcd, factor
def _pemexpand(expr):
"Expand with the minimal set of hints necessary to check the result."
return expr.expand(deep=True, mul=True, power_exp=True,
power_base=False, basic=False, multinomial=True, log=False)
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
if g is not S.One:
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
for i, (cc, _) in enumerate(args):
cc[0] = cc[0]/c
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][0] = il*args[i][1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for i, a in enumerate(args):
args[i][1] = args[i][1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][-1] = args[i][1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for i, a in enumerate(args):
args[i][1] = a[1][:len(a[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = powsimp(factor(new_mid))
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
if e.is_Integer:
ncfac.extend([b]*e)
else:
ncfac.append(f)
pre_mid = g*Mul(*cfac)*l
target = _pemexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _pemexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
|
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
|
|
import sys
import string
from collections.abc import Sequence, Mapping
class SQLText:
__tuple__ = ('_segments',)
def __init__(self):
self._segments = []
def __bool__(self):
return bool(self._segments)
def __iadd__(self, sqltext):
if isinstance(sqltext, SQLText):
self._segments += sqltext._segments
return self
elif isinstance(sqltext, str):
return self._join(sqltext, sep='', vars=sys._getframe(1).f_locals)
else:
raise TypeError(type(sqltext))
def __add__(self, sqltext):
if isinstance(sqltext, SQLText):
newone = SQLText()
newone._segments += self._segments
newone._segments += sqltext._segments
elif isinstance(sqltext, str):
segments = _sqlstr_parse(sqltext, sys._getframe(1).f_locals)
newone = SQLText()
newone._segments += self._segments
newone._segments += segments
return newone
else:
raise TypeError(type(sqltext))
return self
def __rshift__(self, sqlblock):
if hasattr(sqlblock, '__lshift__'):
sqlblock.__lshift__(self)
return sqlblock
raise ValueError("SQL(' ') >> sqlblock")
def _join(self, *sqltexts, sep='', vars):
if not sqltexts:
return
sql_text_iter = iter(sqltexts)
sqltext = next(sql_text_iter, None)
if isinstance(sqltext, str):
segments = _sqlstr_parse(sqltext, vars)
elif isinstance(sqltext, SQLText):
segments = [] + sqltext._segments
else:
raise TypeError()
if segments and self._segments:
self._segments.append(SQLSegment(sep, vars))
self._segments += segments
for sqltext in sql_text_iter:
if isinstance(sqltext, str):
segments = _sqlstr_parse(sqltext, vars)
elif isinstance(sqltext, SQLText):
segments = [] + sqltext._segments
else:
raise TypeError()
# segments = qlstr_parse(sqlstr, frame)
if segments:
self._segments.append(SQLSegment(sep, vars))
self._segments += segments
return self
def clear(self):
self._segments = []
def get_statment(self, *, params=None, many_params=None):
sql_text = ''
placeholders = []
var_counter = 0
for seg in self._segments:
if isinstance(seg, SQLSegment):
sql_text += seg.text
elif isinstance(seg, SQLPlaceholder):
placeholders.append(seg)
var_counter += 1
sql_text += f"${var_counter}"
if many_params is None:
assert params is None or isinstance(params, Mapping)
return sql_text, eval_param_vals(params, placeholders)
else:
assert isinstance(many_params, Sequence)
many_sql_vals = []
for params in many_params:
many_sql_vals.append(eval_param_vals(params, placeholders))
return sql_text + ";", many_sql_vals
def __str__(self):
return f"SQLText({str(self._segments)}])"
def eval_param_vals(params, placeholders):
sql_vals = []
for seg in placeholders:
localvars = {}
localvars.update(seg.vars)
if params:
localvars.update(params)
value = eval_expr(seg.field_name, localvars)
sql_vals += [value]
return sql_vals
class SQLSegmentBase:
def __init__(self, vars):
self.offset = (0, 0) # lineno, charpos at line
self.vars = vars # frame.f_lineno frame.f_code.co_filename
class SQLSegment(SQLSegmentBase):
def __init__(self, text, vars):
super().__init__(vars)
self.text = text
# compute the offset of this segment
lines = text.splitlines()
if lines:
offset_lineno = len(lines) - 1
offset_charpos = len(lines[offset_lineno])
self.offset = (offset_lineno, offset_charpos)
def __repr__(self):
return f"SQLSegment(text='{self.text}', offset={self.offset})"
class SQLPlaceholder(SQLSegmentBase):
def __init__(self, field_name, value, vars):
super().__init__(vars)
self.value = value
self.field_name = field_name
def __repr__(self):
return f"SQLPlaceholder(value='{self.value}', offset={self.offset})"
_formatter = string.Formatter()
def _sqlstr_parse(sqlstr, vars):
vars = dict(vars)
segments = []
for text, field_name, _, _ in _formatter.parse(sqlstr):
segments.append(SQLSegment(text, vars))
if not field_name:
continue
val = eval_expr(field_name, vars)
if isinstance(val, SQLText):
segments += val._segments
else:
seg = SQLPlaceholder(field_name, val, vars)
segments.append(seg)
return segments
def eval_expr(expr, localvars):
try:
return eval(expr, None, localvars)
except Exception as exc:
errmsg = (f"{str(exc)}, while evaluating the expression "
f"'{expr}' with local variables: {localvars}")
raise type(exc)(errmsg)
def SQL(*sqlstrs, sep='', vars=None):
if vars is None:
vars = sys._getframe(1).f_locals
sqltext = SQLText()
sqltext._join(*sqlstrs, sep=sep, vars=vars)
return sqltext
__all__ = ['SQL']
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from generate import generate
class VariantType:
def __init__(self,
variantType,
managedType,
emitAccessors=True,
isPrimitiveType=True,
unmanagedRepresentationType=None,
includeInUnionTypes=True,
getStatements=None,
setStatements=None,
critical=False):
self.emitAccessors = emitAccessors
self.variantType = variantType
self.managedType = managedType
self.isPrimitiveType = isPrimitiveType
if unmanagedRepresentationType == None: self.unmanagedRepresentationType = managedType
else: self.unmanagedRepresentationType = unmanagedRepresentationType
self.includeInUnionTypes = includeInUnionTypes
self.getStatements = getStatements
self.setStatements = setStatements
self.managedFieldName = "_" + self.variantType.lower()
firstChar = self.variantType[0]
self.name = self.variantType.lower().replace(firstChar.lower(), firstChar, 1)
self.accessorName = "As" + self.name
self.critical = critical
def write_UnionTypes(self, cw):
if not self.includeInUnionTypes: return
if self.unmanagedRepresentationType == "IntPtr":
cw.write('[SuppressMessage("Microsoft.Reliability", "CA2006:UseSafeHandleToEncapsulateNativeResources")]')
if self.managedFieldName == '_bstr':
cw.write('[SuppressMessage("Microsoft.Performance", "CA1823:AvoidUnusedPrivateFields")]')
cw.write("[FieldOffset(0)] internal %s %s;" % (self.unmanagedRepresentationType, self.managedFieldName))
def write_ToObject(self, cw):
cw.write("case VarEnum.VT_%s: return %s;" % (self.variantType, self.accessorName))
def write_accessor(self, cw, transparent):
if self.emitAccessors == False :
return
cw.write("// VT_%s" % self.variantType)
cw.enter_block('public %s %s' % (self.managedType, self.accessorName))
# Getter
if not transparent and self.critical: gen_exposed_code_security(cw)
cw.enter_block("get")
cw.write("Debug.Assert(VariantType == VarEnum.VT_%s);" % self.variantType)
if self.getStatements == None:
cw.write("return _typeUnion._unionTypes.%s;" % self.managedFieldName)
else:
for s in self.getStatements: cw.write(s)
cw.exit_block()
# Setter
if not transparent and self.critical: gen_exposed_code_security(cw)
cw.enter_block("set")
cw.write("Debug.Assert(IsEmpty); // The setter can only be called once as VariantClear might be needed otherwise")
cw.write("VariantType = VarEnum.VT_%s;" % self.variantType)
if self.setStatements == None:
cw.write("_typeUnion._unionTypes.%s = value;" % self.managedFieldName)
else:
for s in self.setStatements: cw.write(s)
cw.exit_block()
cw.exit_block()
# Byref Setter
cw.writeline()
if not transparent: gen_exposed_code_security(cw)
cw.enter_block("public void SetAsByref%s(ref %s value)" % (self.name, self.unmanagedRepresentationType))
cw.write("Debug.Assert(IsEmpty); // The setter can only be called once as VariantClear might be needed otherwise")
cw.write("VariantType = (VarEnum.VT_%s | VarEnum.VT_BYREF);" % self.variantType)
cw.write("_typeUnion._unionTypes._byref = UnsafeMethods.Convert%sByrefToPtr(ref value);" % self.unmanagedRepresentationType)
cw.exit_block()
cw.writeline()
def write_accessor_propertyinfo(self, cw):
if self.emitAccessors == True :
cw.write('case VarEnum.VT_%s: return typeof(Variant).GetProperty(nameof(Variant.%s));' % (self.variantType, self.accessorName))
def write_byref_setters(self, cw):
if self.emitAccessors == True :
cw.write('case VarEnum.VT_%s: return typeof(Variant).GetMethod(nameof(Variant.SetAsByref%s));' % (self.variantType, self.name))
def write_ComToManagedPrimitiveTypes(self, cw):
wrapper_types = ["CY", "DISPATCH", "UNKNOWN", "ERROR"]
if not self.isPrimitiveType or (self.variantType in wrapper_types) : return
cw.write("dict[VarEnum.VT_%s] = typeof(%s);" % (self.variantType, self.managedType))
def write_IsPrimitiveType(self, cw):
if not self.isPrimitiveType: return
cw.write("case VarEnum.VT_%s:" % self.variantType)
def write_ConvertByrefToPtr(self, cw, transparent):
if self.isPrimitiveType and self.unmanagedRepresentationType == self.managedType and self.variantType != "ERROR":
if not transparent: gen_exposed_code_security(cw)
cw.write('[SuppressMessage("Microsoft.Design", "CA1045:DoNotPassTypesByReference")]')
if self.unmanagedRepresentationType == 'Int32':
cw.enter_block("public static unsafe IntPtr Convert%sByrefToPtr(ref %s value)" % (self.unmanagedRepresentationType, self.unmanagedRepresentationType))
else:
cw.enter_block("internal static unsafe IntPtr Convert%sByrefToPtr(ref %s value)" % (self.unmanagedRepresentationType, self.unmanagedRepresentationType))
cw.enter_block('fixed (%s *x = &value)' % self.unmanagedRepresentationType)
cw.write('AssertByrefPointsToStack(new IntPtr(x));')
cw.write('return new IntPtr(x);')
cw.exit_block()
cw.exit_block()
cw.write('')
def write_ConvertByrefToPtr_Outer(self, cw, transparent):
if self.isPrimitiveType and self.unmanagedRepresentationType == self.managedType and self.variantType != "ERROR":
if not transparent: gen_exposed_code_security(cw)
cw.write('[SuppressMessage("Microsoft.Design", "CA1045:DoNotPassTypesByReference")]')
cw.write("public static IntPtr Convert%sByrefToPtr(ref %s value) { return _Convert%sByrefToPtr(ref value); }" % (self.unmanagedRepresentationType, self.unmanagedRepresentationType, self.unmanagedRepresentationType))
def write_ConvertByrefToPtrDelegates(self, cw):
if self.isPrimitiveType and self.unmanagedRepresentationType == self.managedType and self.variantType != "ERROR":
cw.write("private static readonly ConvertByrefToPtrDelegate<%s> _Convert%sByrefToPtr = (ConvertByrefToPtrDelegate<%s>)Delegate.CreateDelegate(typeof(ConvertByrefToPtrDelegate<%s>), _ConvertByrefToPtr.MakeGenericMethod(typeof(%s)));" % (5 * (self.unmanagedRepresentationType,)))
def gen_exposed_code_security(cw):
cw.write("#if CLR2")
cw.write("[PermissionSet(SecurityAction.LinkDemand, Unrestricted = true)]")
cw.write("#endif")
cw.write("[SecurityCritical]")
variantTypes = [
# VariantType('varEnum', 'managed_type')
VariantType('I1', "SByte"),
VariantType('I2', "Int16"),
VariantType('I4', "Int32"),
VariantType('I8', "Int64"),
VariantType('UI1', "Byte"),
VariantType('UI2', "UInt16"),
VariantType('UI4', "UInt32"),
VariantType('UI8', "UInt64"),
VariantType('INT', "IntPtr"),
VariantType('UINT', "UIntPtr"),
VariantType('BOOL', "Boolean",
unmanagedRepresentationType="Int16",
getStatements=["return _typeUnion._unionTypes._bool != 0;"],
setStatements=["_typeUnion._unionTypes._bool = value ? (Int16)(-1) : (Int16)0;"]),
VariantType("ERROR", "Int32"),
VariantType('R4', "Single"),
VariantType('R8', "Double"),
VariantType('DECIMAL', "Decimal",
includeInUnionTypes=False,
getStatements=["// The first byte of Decimal is unused, but usually set to 0",
"Variant v = this;",
"v._typeUnion._vt = 0;",
"return v._decimal;"],
setStatements=["_decimal = value;",
"// _vt overlaps with _decimal, and should be set after setting _decimal",
"_typeUnion._vt = (ushort)VarEnum.VT_DECIMAL;"]),
VariantType("CY", "Decimal",
unmanagedRepresentationType="Int64",
getStatements=["return Decimal.FromOACurrency(_typeUnion._unionTypes._cy);"],
setStatements=["_typeUnion._unionTypes._cy = Decimal.ToOACurrency(value);"]),
VariantType('DATE', "DateTime",
unmanagedRepresentationType="Double",
getStatements=["return DateTime.FromOADate(_typeUnion._unionTypes._date);"],
setStatements=["_typeUnion._unionTypes._date = value.ToOADate();"]),
VariantType('BSTR', "String",
unmanagedRepresentationType="IntPtr",
getStatements=[
"if (_typeUnion._unionTypes._bstr != IntPtr.Zero) {",
" return Marshal.PtrToStringBSTR(_typeUnion._unionTypes._bstr);",
"}",
"return null;"
],
setStatements=[
"if (value != null) {",
" Marshal.GetNativeVariantForObject(value, UnsafeMethods.ConvertVariantByrefToPtr(ref this));",
"}"
],
critical=True),
VariantType("UNKNOWN", "Object",
isPrimitiveType=False,
unmanagedRepresentationType="IntPtr",
getStatements=[
"if (_typeUnion._unionTypes._dispatch != IntPtr.Zero) {",
" return Marshal.GetObjectForIUnknown(_typeUnion._unionTypes._unknown);",
"}",
"return null;"
],
setStatements=[
"if (value != null) {",
" _typeUnion._unionTypes._unknown = Marshal.GetIUnknownForObject(value);",
"}"
],
critical=True),
VariantType("DISPATCH", "Object",
isPrimitiveType=False,
unmanagedRepresentationType="IntPtr",
getStatements=[
"if (_typeUnion._unionTypes._dispatch != IntPtr.Zero) {",
" return Marshal.GetObjectForIUnknown(_typeUnion._unionTypes._dispatch);",
"}",
"return null;"
],
setStatements=[
"if (value != null) {",
" _typeUnion._unionTypes._unknown = GetIDispatchForObject(value);",
"}"
],
critical=True),
VariantType("VARIANT", "Object",
emitAccessors=False,
isPrimitiveType=False,
unmanagedRepresentationType="Variant",
includeInUnionTypes=False, # will use "this"
getStatements=["return Marshal.GetObjectForNativeVariant(UnsafeMethods.ConvertVariantByrefToPtr(ref this));"],
setStatements=["UnsafeMethods.InitVariantForObject(value, ref this);"],
critical=True)
]
managed_types_to_variant_types_add = [
("Char", "UI2"),
("CurrencyWrapper", "CY"),
("ErrorWrapper", "ERROR"),
]
def gen_UnionTypes(cw):
for variantType in variantTypes:
variantType.write_UnionTypes(cw)
def gen_ToObject(cw):
for variantType in variantTypes:
variantType.write_ToObject(cw)
def gen_accessors(transparent):
def gen_accessors(cw):
for variantType in variantTypes:
variantType.write_accessor(cw, transparent)
return gen_accessors
def gen_accessor_propertyinfo(cw):
for variantType in variantTypes:
variantType.write_accessor_propertyinfo(cw)
def gen_byref_setters(cw):
for variantType in variantTypes:
variantType.write_byref_setters(cw)
def gen_ComToManagedPrimitiveTypes(cw):
for variantType in variantTypes:
variantType.write_ComToManagedPrimitiveTypes(cw)
def gen_ManagedToComPrimitiveTypes(cw):
import System
import clr
# build inverse map
type_map = {}
for variantType in variantTypes:
# take them in order, first one wins ... handles ERROR and INT32 conflict
if variantType.isPrimitiveType and variantType.managedType not in type_map:
type_map[variantType.managedType] = variantType.variantType
for managedType, variantType in managed_types_to_variant_types_add:
type_map[managedType] = variantType
def is_system_type(name):
t = getattr(System, name, None)
return t and System.Type.GetTypeCode(t) not in [System.TypeCode.Empty, System.TypeCode.Object]
system_types = filter(is_system_type, type_map.keys())
system_types = sorted(system_types, key=lambda name: int(System.Type.GetTypeCode(getattr(System, name))))
other_types = sorted(set(type_map.keys()).difference(set(system_types)))
# switch from sytem types
cw.enter_block("switch (Type.GetTypeCode(argumentType))")
for t in system_types:
cw.write("""case TypeCode.%(code)s:
primitiveVarEnum = VarEnum.VT_%(vt)s;
return true;""", code = System.Type.GetTypeCode(getattr(System, t)).ToString(), vt = type_map[t])
cw.exit_block()
# if statements from the rest
for t in other_types:
clrtype = getattr(System, t, None)
if not clrtype: clrtype = getattr(System.Runtime.InteropServices, t, None)
clrtype = clr.GetClrType(clrtype)
cw.write("""
if (argumentType == typeof(%(type)s)) {
primitiveVarEnum = VarEnum.VT_%(vt)s;
return true;
}""", type = clrtype.Name, vt = type_map[t])
def gen_IsPrimitiveType(cw):
for variantType in variantTypes:
variantType.write_IsPrimitiveType(cw)
def gen_ConvertByrefToPtr(transparent):
def gen_ConvertByrefToPtr(cw):
for variantType in variantTypes:
if transparent:
variantType.write_ConvertByrefToPtr_Outer(cw, transparent)
else:
variantType.write_ConvertByrefToPtr(cw, transparent)
return gen_ConvertByrefToPtr
def gen_ConvertByrefToPtrDelegates(cw):
for variantType in variantTypes:
variantType.write_ConvertByrefToPtrDelegates(cw)
def main():
return generate(
("Convert ByRef Delegates", gen_ConvertByrefToPtrDelegates),
("Outer Managed To COM Primitive Type Map", gen_ManagedToComPrimitiveTypes),
("Outer Variant union types", gen_UnionTypes),
("Outer Variant ToObject", gen_ToObject),
("Outer Variant accessors", gen_accessors(True)),
("Outer Variant accessors PropertyInfos", gen_accessor_propertyinfo),
("Outer Variant byref setter", gen_byref_setters),
("Outer ComToManagedPrimitiveTypes", gen_ComToManagedPrimitiveTypes),
("Outer Variant IsPrimitiveType", gen_IsPrimitiveType),
("Outer ConvertByrefToPtr", gen_ConvertByrefToPtr(True)),
#TODO: we don't build ndp\fx\src\Dynamic any more for IronPython
#("Managed To COM Primitive Type Map", gen_ManagedToComPrimitiveTypes),
#("Variant union types", gen_UnionTypes),
#("Variant ToObject", gen_ToObject),
#("Variant accessors", gen_accessors(False)),
#("Variant accessors PropertyInfos", gen_accessor_propertyinfo),
#("Variant byref setter", gen_byref_setters),
#("ComToManagedPrimitiveTypes", gen_ComToManagedPrimitiveTypes),
#("Variant IsPrimitiveType", gen_IsPrimitiveType),
#("ConvertByrefToPtr", gen_ConvertByrefToPtr(False)),
)
if __name__ == "__main__":
main()
|
|
from card import Card
def create_table_if_not_exists(conn, cursor):
query = """
CREATE TABLE IF NOT EXISTS cards (
front TEXT NOT NULL,
back TEXT NOT NULL,
score INTEGER NOT NULL,
last_viewed TIMESTAMP NOT NULL
)
"""
cursor.execute(query)
conn.commit()
def check_if_empty(cursor):
query = """
SELECT EXISTS(
SELECT 1
FROM cards
)
"""
cursor.execute(query)
query_result = cursor.fetchone()
if query_result[0] == 0:
return True
else:
return False
def insert(conn, cursor, card):
args = card.to_tuple()
query = """
INSERT INTO cards (front, back, score, last_viewed)
VALUES (?, ?, ?, ?)
"""
cursor.execute(query, args)
conn.commit()
def insert_flipped_card(conn, cursor, card):
args = (card.back, card.front, card.score, card.last_viewed)
query = """
INSERT INTO cards (front, back, score, last_viewed)
VALUES (?, ?, ?, ?)
"""
cursor.execute(query, args)
conn.commit()
def select_one_by_score(cursor, score):
query = """
SELECT *
FROM cards
WHERE score = ?
ORDER BY last_viewed
LIMIT 1
"""
cursor.execute(query, str(score))
row = cursor.fetchone()
if row == None:
return None
else:
card = Card(*row)
return card
# MATCHES is a custom function defined in db_connection.py
def select_by_regex(cursor, regex):
query = """
SELECT *
FROM cards
WHERE MATCHES(?, front)
OR MATCHES(?, back)
ORDER BY last_viewed
"""
cursor.execute(query, (regex, regex))
results = cursor.fetchall()
return results
def select_by_regex_front(cursor, regex):
query = """
SELECT *
FROM cards
WHERE MATCHES(?, front)
ORDER BY last_viewed
"""
cursor.execute(query, (regex,))
results = cursor.fetchall()
return results
def select_by_regex_back(cursor, regex):
query = """
SELECT *
FROM cards
WHERE MATCHES(?, back)
ORDER BY last_viewed
"""
cursor.execute(query, (regex,))
results = cursor.fetchall()
return results
def select_by_score(cursor, score):
query = """
SELECT *
FROM cards
WHERE score = ?
ORDER BY last_viewed
"""
cursor.execute(query, (score,))
results = cursor.fetchall()
return results
def select_by_last_viewed(cursor):
query = """
SELECT *
FROM cards
ORDER BY last_viewed
"""
cursor.execute(query)
results = cursor.fetchall()
return results
def select_by_last_viewed_reverse(cursor):
query = """
SELECT *
FROM cards
ORDER BY last_viewed DESC
"""
cursor.execute(query)
results = cursor.fetchall()
return results
def select_flipped_card(cursor, card):
args = (card.back, card.front)
query = """
SELECT *
FROM cards
WHERE front = ?
AND back = ?
"""
cursor.execute(query, args)
result = cursor.fetchall()
if len(result) == 0:
return None
else:
row = result[0]
return Card(*row)
def delete(conn, cursor, card):
args = card.to_tuple()
query = """
DELETE FROM cards
WHERE front = ?
AND back = ?
AND score = ?
AND last_viewed = ?
"""
cursor.execute(query, args)
conn.commit()
def delete_flipped_card(conn, cursor, card):
args = (card.back, card.front)
query = """
DELETE FROM cards
WHERE front = ?
AND back = ?
"""
cursor.execute(query, args)
conn.commit()
def is_two_way_card(cursor, card):
args = (card.back, card.front)
query = """
SELECT EXISTS(
SELECT *
FROM cards
WHERE front = ?
AND back = ?
)
"""
cursor.execute(query, args)
query_result = cursor.fetchone()
if query_result[0] == 0:
return False
else:
return True
def count(cursor):
query = """
SELECT count(*)
FROM cards
"""
cursor.execute(query)
result = cursor.fetchone()
return int(result[0])
|
|
import ply.lex as lex
import ply.yacc as yacc
import re
# Classes
class Program(object):
def __init__(self, items):
self.items = items
def get_lookup(self):
return dict((item.name, item) for item in self.items
if isinstance(item, MacroDefinition))
def preprocess(self, lookup):
items = []
count = 0
line = 1
for item in self.items:
newlines = item.line - line
if newlines:
items.append('\n' * (newlines))
line = item.line
if isinstance(item, MacroCall):
if item.name not in lookup:
raise Exception('Call to undefined macro: %s'
% item.name)
macro = lookup[item.name]
items.extend(macro.invoke(item.arguments))
count += 1
elif isinstance(item, Token):
if item.name in lookup:
macro = lookup[item.name]
items.extend(macro.invoke(()))
count += 1
else:
items.append(item.name)
lines = ' '.join(items).split('\n')
result = '\n'.join(line.strip() for line in lines)
return count, result
class MacroDefinition(object):
def __init__(self, line, name, parameters, tokens):
self.line = line
self.name = name
self.parameters = parameters
self.tokens = tokens
def invoke(self, arguments):
if len(arguments) != len(self.parameters):
raise Exception('Incorrect number of arguments for macro: %s'
% self.name)
lookup = dict(zip(self.parameters, arguments))
tokens = []
for token in self.tokens:
tokens.extend(lookup.get(token.name, [token]))
result = [token.name for token in tokens]
return result
class MacroCall(object):
def __init__(self, line, name, arguments):
self.line = line
self.name = name
self.arguments = arguments
class Token(object):
def __init__(self, line, name):
self.line = line
self.name = name
# Lexer Rules
tokens = [
'MACRO',
'COMMA',
'LBRACE',
'RBRACE',
'LBRACK',
'RBRACK',
'LPAREN',
'RPAREN',
'STRING',
'ID',
'OTHER',
]
t_ignore = ' \t\r'
t_ignore_COMMENT = r';.*'
t_MACRO = r'\#macro'
t_COMMA = r'\,'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LBRACK = r'\['
t_RBRACK = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_STRING = r'"[^"]*"'
t_ID = r'[_a-zA-Z][_a-zA-Z0-9]*'
t_OTHER = r'[^_a-zA-Z\s\;\,\{\}\[\]\(\)\"\#][^\s\;\,\{\}\[\]\(\)\"\#]*'
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
raise Exception(t)
# Parser Rules
def p_program(t):
'program : items'
t[0] = Program(t[1])
def p_items1(t):
'items : item items'
t[0] = (t[1],) + t[2]
def p_items2(t):
'items : item'
t[0] = (t[1],)
def p_item(t):
'''item : macro_definition
| macro_call
| token'''
t[0] = t[1]
def p_macro_definition(t):
'macro_definition : MACRO ID parameter_list LBRACE tokens RBRACE'
t[0] = MacroDefinition(t.lineno(1), t[2], t[3], t[5])
def p_parameter_list1(t):
'parameter_list : LPAREN parameters RPAREN'
t[0] = t[2]
def p_parameter_list2(t):
'parameter_list : empty'
t[0] = ()
def p_parameters1(t):
'parameters : ID COMMA parameters'
t[0] = (t[1],) + t[3]
def p_parameters2(t):
'parameters : ID'
t[0] = (t[1],)
def p_macro_call(t):
'macro_call : ID argument_list'
t[0] = MacroCall(t.lineno(1), t[1], t[2])
def p_argument_list1(t):
'argument_list : LPAREN arguments RPAREN'
t[0] = t[2]
def p_argument_list2(t):
'argument_list : empty'
t[0] = ()
def p_arguments1(t):
'arguments : argument COMMA arguments'
t[0] = (t[1],) + t[3]
def p_arguments2(t):
'arguments : argument'
t[0] = (t[1],)
def p_argument1(t):
'argument : argument_token argument'
t[0] = (t[1],) + t[2]
def p_argument2(t):
'argument : argument_token'
t[0] = (t[1],)
def p_argument_token(t):
'''argument_token : LBRACK
| RBRACK
| STRING
| ID
| OTHER'''
t[0] = Token(t.lineno(1), t[1])
def p_tokens1(t):
'tokens : token tokens'
t[0] = (t[1],) + t[2]
def p_tokens2(t):
'tokens : token'
t[0] = (t[1],)
def p_token(t):
'''token : COMMA
| LBRACK
| RBRACK
| LPAREN
| RPAREN
| STRING
| ID
| OTHER'''
t[0] = Token(t.lineno(1), t[1])
def p_empty(t):
'empty :'
pass
def p_error(t):
raise Exception(t)
# Preprocessor Functions
def create_lexer():
lexer = lex.lex()
return lexer
def create_parser():
parser = yacc.yacc(debug=False, write_tables=False)
return parser
LEXER = create_lexer()
PARSER = create_parser()
def include_files(text):
lines = []
pattern = re.compile(r'\#include\s+\"([^"]+)\"')
for line in text.split('\n'):
match = pattern.match(line.strip())
if match is None:
lines.append(line)
else:
path = match.group(1)
with open(path) as fp:
lines.extend(fp.read().split('\n'))
result = '\n'.join(lines)
return result
def convert_defines(text):
lines = []
pattern = re.compile(r'\#define\s+([_a-zA-Z][_a-zA-Z0-9]*)\s+(.+)')
for line in text.split('\n'):
match = pattern.match(line.strip())
if match is None:
lines.append(line)
else:
name = match.group(1)
value = match.group(2)
macro = '#macro %s { %s }' % (name, value)
print macro
lines.append(macro)
result = '\n'.join(lines)
return result
def preprocess(text):
text = convert_defines(text)
lookup = None
while True:
LEXER.lineno = 1
program = PARSER.parse(text)
if lookup is None:
lookup = program.get_lookup()
count, text = program.preprocess(lookup)
if count == 0:
break
return text
def preprocess_file(path):
with open(path) as fp:
text = fp.read()
return preprocess(text)
|
|
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Predefined step definitions for handling dialog interaction with Mycroft for
use with behave.
"""
from os.path import join, exists, basename
from glob import glob
import re
import time
from behave import given, when, then
from mycroft.messagebus import Message
from mycroft.audio import wait_while_speaking
from test.integrationtests.voight_kampff import (mycroft_responses, then_wait,
then_wait_fail)
def find_dialog(skill_path, dialog, lang):
"""Check the usual location for dialogs.
TODO: subfolders
"""
if exists(join(skill_path, 'dialog')):
return join(skill_path, 'dialog', lang, dialog)
else:
return join(skill_path, 'locale', lang, dialog)
def load_dialog_file(dialog_path):
"""Load dialog files and get the contents."""
with open(dialog_path) as f:
lines = f.readlines()
return [l.strip().lower() for l in lines
if l.strip() != '' and l.strip()[0] != '#']
def load_dialog_list(skill_path, dialog):
"""Load dialog from files into a single list.
Args:
skill (MycroftSkill): skill to load dialog from
dialog (list): Dialog names (str) to load
Returns:
tuple (list of Expanded dialog strings, debug string)
"""
dialog_path = find_dialog(skill_path, dialog)
debug = 'Opening {}\n'.format(dialog_path)
return load_dialog_file(dialog_path), debug
def dialog_from_sentence(sentence, skill_path, lang):
"""Find dialog file from example sentence.
Args:
sentence (str): Text to match
skill_path (str): path to skill directory
lang (str): language code to use
Returns (str): Dialog file best matching the sentence.
"""
dialog_paths = join(skill_path, 'dialog', lang, '*.dialog')
best = (None, 0)
for path in glob(dialog_paths):
patterns = load_dialog_file(path)
match, _ = _match_dialog_patterns(patterns, sentence.lower())
if match is not False:
if len(patterns[match]) > best[1]:
best = (path, len(patterns[match]))
if best[0] is not None:
return basename(best[0])
else:
return None
def _match_dialog_patterns(dialogs, sentence):
"""Match sentence against a list of dialog patterns.
Returns index of found match.
"""
# Allow custom fields to be anything
dialogs = [re.sub(r'{.*?\}', r'.*', dia) for dia in dialogs]
# Remove left over '}'
dialogs = [re.sub(r'\}', r'', dia) for dia in dialogs]
dialogs = [re.sub(r' .* ', r' .*', dia) for dia in dialogs]
# Merge consequtive .*'s into a single .*
dialogs = [re.sub(r'\.\*( \.\*)+', r'.*', dia) for dia in dialogs]
# Remove double whitespaces
dialogs = ['^' + ' '.join(dia.split()) for dia in dialogs]
debug = 'MATCHING: {}\n'.format(sentence)
for index, regex in enumerate(dialogs):
match = re.match(regex, sentence)
debug += '---------------\n'
debug += '{} {}\n'.format(regex, match is not None)
if match:
return index, debug
else:
return False, debug
@given('an english speaking user')
def given_english(context):
context.lang = 'en-us'
@given('a {timeout} seconds timeout')
@given('a {timeout} second timeout')
def given_timeout(context, timeout):
"""Set the timeout for the steps in this scenario."""
context.step_timeout = float(timeout)
@given('a {timeout} minutes timeout')
@given('a {timeout} minute timeout')
def given_timeout(context, timeout):
"""Set the timeout for the steps in this scenario."""
context.step_timeout = float(timeout) * 60
@when('the user says "{text}"')
def when_user_says(context, text):
context.bus.emit(Message('recognizer_loop:utterance',
data={'utterances': [text],
'lang': context.lang,
'session': '',
'ident': time.time()},
context={'client_name': 'mycroft_listener'}))
@then('"{skill}" should reply with dialog from "{dialog}"')
def then_dialog(context, skill, dialog):
def check_dialog(message):
utt_dialog = message.data.get('meta', {}).get('dialog')
return (utt_dialog == dialog.replace('.dialog', ''), '')
passed, debug = then_wait('speak', check_dialog, context)
if not passed:
assert_msg = debug
assert_msg += mycroft_responses(context)
assert passed, assert_msg or 'Mycroft didn\'t respond'
@then('"{skill}" should not reply')
def then_do_not_reply(context, skill):
def check_all_dialog(message):
msg_skill = message.data.get('meta').get('skill')
utt = message.data['utterance'].lower()
skill_responded = skill == msg_skill
debug_msg = ("{} responded with '{}'. \n".format(skill, utt)
if skill_responded else '')
return (skill_responded, debug_msg)
passed, debug = then_wait_fail('speak', check_all_dialog, context)
if not passed:
assert_msg = debug
assert_msg += mycroft_responses(context)
assert passed, assert_msg or '{} responded'.format(skill)
@then('"{skill}" should reply with "{example}"')
def then_example(context, skill, example):
skill_path = context.msm.find_skill(skill).path
dialog = dialog_from_sentence(example, skill_path, context.lang)
print('Matching with the dialog file: {}'.format(dialog))
assert dialog is not None, 'No matching dialog...'
then_dialog(context, skill, dialog)
@then('"{skill}" should reply with anything')
def then_anything(context, skill):
def check_any_messages(message):
debug = ''
result = message is not None
return (result, debug)
passed = then_wait('speak', check_any_messages, context)
assert passed, 'No speech received at all'
@then('"{skill}" should reply with exactly "{text}"')
def then_exactly(context, skill, text):
def check_exact_match(message):
utt = message.data['utterance'].lower()
debug = 'Comparing {} with expected {}\n'.format(utt, text)
result = utt == text.lower()
return (result, debug)
passed, debug = then_wait('speak', check_exact_match, context)
if not passed:
assert_msg = debug
assert_msg += mycroft_responses(context)
assert passed, assert_msg
@then('mycroft reply should contain "{text}"')
def then_contains(context, text):
def check_contains(message):
utt = message.data['utterance'].lower()
debug = 'Checking if "{}" contains "{}"\n'.format(utt, text)
result = text.lower() in utt
return (result, debug)
passed, debug = then_wait('speak', check_contains, context)
if not passed:
assert_msg = 'No speech contained the expected content'
assert_msg += mycroft_responses(context)
assert passed, assert_msg
@then('the user replies with "{text}"')
@then('the user replies "{text}"')
@then('the user says "{text}"')
def then_user_follow_up(context, text):
"""Send a user response after being prompted by device.
The sleep after the device is finished speaking is to address a race
condition in the MycroftSkill base class conversational code. It can
be removed when the race condition is addressed.
"""
wait_while_speaking()
time.sleep(2)
context.bus.emit(Message('recognizer_loop:utterance',
data={'utterances': [text],
'lang': context.lang,
'session': '',
'ident': time.time()},
context={'client_name': 'mycroft_listener'}))
@then('mycroft should send the message "{message_type}"')
def then_messagebus_message(context, message_type):
"""Verify a specific message is sent."""
def check_dummy(message):
"""We are just interested in the message data, just the type."""
return True, ""
message_found, _ = then_wait(message_type, check_dummy, context)
assert message_found, "No matching message received."
|
|
from __future__ import absolute_import
from datetime import datetime, timedelta
from sentry.testutils import APITestCase, SnubaTestCase
from django.core.urlresolvers import reverse
class DiscoverQueryTest(APITestCase, SnubaTestCase):
def setUp(self):
super(DiscoverQueryTest, self).setUp()
self.now = datetime.now()
one_second_ago = self.now - timedelta(seconds=1)
self.login_as(user=self.user, superuser=False)
self.org = self.create_organization(owner=self.user, name="foo")
self.project = self.create_project(name="bar", organization=self.org)
self.other_project = self.create_project(name="other")
self.group = self.create_group(project=self.project, short_id=20)
self.event = self.create_event(
group=self.group,
platform="python",
datetime=one_second_ago,
tags={"environment": "production", "sentry:release": "foo", "error.custom": "custom"},
data={
"message": "message!",
"exception": {
"values": [
{
"type": "ValidationError",
"value": "Bad request",
"mechanism": {"type": "1", "value": "1"},
"stacktrace": {
"frames": [
{
"function": "?",
"filename": "http://localhost:1337/error.js",
"lineno": 29,
"colno": 3,
"in_app": True,
}
]
},
}
]
},
},
)
def test(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform.name"],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
"range": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["message"] == "message!"
assert response.data["data"][0]["platform.name"] == "python"
def test_relative_dates(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform.name"],
"range": "1d",
"orderby": "-timestamp",
"start": None,
"end": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["message"] == "message!"
assert response.data["data"][0]["platform.name"] == "python"
def test_invalid_date_request(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform"],
"range": "1d",
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
},
)
assert response.status_code == 400, response.content
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform"],
"statsPeriodStart": "7d",
"statsPeriodEnd": "1d",
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
},
)
assert response.status_code == 400, response.content
def test_conditional_fields(self):
with self.feature("organizations:discover"):
one_second_ago = self.now - timedelta(seconds=1)
self.create_event(
group=self.group,
platform="javascript",
datetime=one_second_ago,
tags={"environment": "production", "sentry:release": "bar"},
data={},
)
self.create_event(
group=self.group,
platform="javascript",
datetime=one_second_ago,
tags={"environment": "production", "sentry:release": "baz"},
data={},
)
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"aggregations": [["count()", None, "count"]],
"conditionFields": [
[
"if",
[["in", ["release", "tuple", ["'foo'"]]], "release", "'other'"],
"release",
]
],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"groupby": ["time", "release"],
"rollup": 86400,
"limit": 1000,
"orderby": "-time",
"range": None,
},
)
assert response.status_code == 200, response.content
# rollup is by one day and diff of start/end is 10 seconds, so we only have one day
assert len(response.data["data"]) == 2
for data in response.data["data"]:
# note this "release" key represents the alias for the column condition
# and is also used in `groupby`, it is NOT the release tag
if data["release"] == "foo":
assert data["count"] == 1
elif data["release"] == "other":
assert data["count"] == 2
def test_invalid_range_value(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform"],
"range": "1x",
"orderby": "-timestamp",
"start": None,
"end": None,
},
)
assert response.status_code == 400, response.content
def test_invalid_aggregation_function(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform"],
"aggregations": [["test", "test", "test"]],
"range": "14d",
"orderby": "-timestamp",
"start": None,
"end": None,
},
)
assert response.status_code == 400, response.content
def test_boolean_condition(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform.name", "stack.in_app"],
"conditions": [["stack.in_app", "=", True]],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
"range": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["message"] == "message!"
assert response.data["data"][0]["platform.name"] == "python"
def test_strip_double_quotes_in_condition_strings(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message"],
"conditions": [["message", "=", '"message!"']],
"range": "14d",
"orderby": "-timestamp",
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["message"] == "message!"
def test_array_join(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "error.type"],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now() + timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
"range": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["error.type"] == "ValidationError"
def test_array_condition_equals(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"conditions": [["error.type", "=", "ValidationError"]],
"fields": ["message"],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
"range": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
def test_array_condition_not_equals(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"conditions": [["error.type", "!=", "ValidationError"]],
"fields": ["message"],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
"range": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 0
def test_array_condition_custom_tag(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"conditions": [["error.custom", "!=", "custom"]],
"fields": ["message"],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
"range": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 0
def test_select_project_name(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["project.name"],
"range": "14d",
"orderby": "-timestamp",
"start": None,
"end": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert (response.data["data"][0]["project.name"]) == "bar"
def test_groupby_project_name(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"aggregations": [["count()", "", "count"]],
"fields": ["project.name"],
"range": "14d",
"orderby": "-count",
"start": None,
"end": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert (response.data["data"][0]["project.name"]) == "bar"
assert (response.data["data"][0]["count"]) == 1
def test_zerofilled_dates_when_rollup_relative(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"aggregations": [["count()", "", "count"]],
"fields": ["project.name"],
"groupby": ["time"],
"orderby": "time",
"range": "5d",
"rollup": 86400,
"start": None,
"end": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 6
assert (response.data["data"][5]["time"]) > response.data["data"][4]["time"]
assert (response.data["data"][5]["project.name"]) == "bar"
assert (response.data["data"][5]["count"]) == 1
def test_zerofilled_dates_when_rollup_absolute(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"aggregations": [["count()", "", "count"]],
"fields": ["project.name"],
"groupby": ["time"],
"orderby": "-time",
"start": (self.now - timedelta(seconds=300)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": self.now.strftime("%Y-%m-%dT%H:%M:%S"),
"rollup": 60,
"range": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 6
assert (response.data["data"][0]["time"]) > response.data["data"][2]["time"]
assert (response.data["data"][0]["project.name"]) == "bar"
assert (response.data["data"][0]["count"]) == 1
def test_uniq_project_name(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"aggregations": [["uniq", "project.name", "uniq_project_name"]],
"range": "14d",
"orderby": "-uniq_project_name",
"start": None,
"end": None,
},
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert (response.data["data"][0]["uniq_project_name"]) == 1
def test_meta_types(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["project.id", "project.name"],
"aggregations": [["count()", "", "count"]],
"range": "14d",
"orderby": "-count",
"start": None,
"end": None,
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == [
{"name": "project.id", "type": "integer"},
{"name": "project.name", "type": "string"},
{"name": "count", "type": "integer"},
]
def test_no_feature_access(self):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.project.id],
"fields": ["message", "platform"],
"range": "14d",
"orderby": "-timestamp",
"start": None,
"end": None,
},
)
assert response.status_code == 404, response.content
def test_invalid_project(self):
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.org.slug])
response = self.client.post(
url,
{
"projects": [self.other_project.id],
"fields": ["message", "platform"],
"range": "14d",
"orderby": "-timestamp",
"start": None,
"end": None,
},
)
assert response.status_code == 403, response.content
def test_superuser(self):
self.new_org = self.create_organization(name="foo_new")
self.new_project = self.create_project(name="bar_new", organization=self.new_org)
self.login_as(user=self.user, superuser=True)
with self.feature("organizations:discover"):
url = reverse("sentry-api-0-discover-query", args=[self.new_org.slug])
response = self.client.post(
url,
{
"projects": [self.new_project.id],
"fields": ["message", "platform"],
"start": (datetime.now() - timedelta(seconds=10)).strftime("%Y-%m-%dT%H:%M:%S"),
"end": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%S"),
"orderby": "-timestamp",
"range": None,
},
)
assert response.status_code == 200, response.content
|
|
from __future__ import unicode_literals
import datetime
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions, management
from django.core.urlresolvers import reverse
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug, ProxyImprovement,
ProxyProxyBug, ProxyTrackerUser, State, StateProxy, StatusPerson,
TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(
Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id + 1
)
self.assertRaises(
Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id + 1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
# don't register this model in the app_cache for the current app,
# otherwise the check fails when other tests are being run.
app_label = 'no_such_app'
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
hint=None,
obj=None,
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
def test_swappable(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['proxy_models'].models.copy()
try:
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
finally:
apps.app_configs['proxy_models'].models = _old_models
apps.all_models['proxy_models'] = _old_models
apps.clear_cache()
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(
name='Contributor', status='contrib')
with self.assertRaises(exceptions.FieldError):
TrackerUser.objects.filter(issue=None),
self.assertQuerysetEqual(
ProxyTrackerUser.objects.filter(issue=None),
[tu], lambda x: x
)
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='proxy_models.urls',)
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = AuthUser.objects.create(
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(7):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.login(username='super', password='secret')
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
self.client.logout()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test sky projections defined in WCS Paper II"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import os
import numpy as np
from numpy.testing import utils
from .. import projections
from ..parameters import InputParameterError
from ...io import fits
from ... import wcs
from ...utils.data import get_pkg_data_filename
from ...tests.helper import pytest
from ...extern.six.moves import range, zip
def test_Projection_properties():
projection = projections.Sky2Pix_PlateCarree()
assert projection.n_inputs == 2
assert projection.n_outputs == 2
PIX_COORDINATES = [-10, 30]
pars = [(x,) for x in projections.projcodes]
# There is no groundtruth file for the XPH projection available here:
# http://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html
pars.remove(('XPH',))
@pytest.mark.parametrize(('code',), pars)
def test_Sky2Pix(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd']
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model(*params)
x, y = tinv(wcslibout['phi'], wcslibout['theta'])
utils.assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
utils.assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
@pytest.mark.parametrize(('code',), pars)
def test_Pix2Sky(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
model = getattr(projections, 'Pix2Sky_' + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES)
utils.assert_almost_equal(np.asarray(phi), wcs_phi)
utils.assert_almost_equal(np.asarray(theta), wcs_theta)
@pytest.mark.parametrize(('code',), pars)
def test_projection_default(code):
"""Check astropy model eval with default parameters"""
# Just makes sure that the default parameter values are reasonable
# and accepted by wcslib.
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model()
x, y = tinv(45, 45)
model = getattr(projections, 'Pix2Sky_' + code)
tinv = model()
x, y = tinv(0, 0)
class TestZenithalPerspective(object):
"""Test Zenithal Perspective projection"""
def setup_class(self):
ID = 'AZP'
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(ID))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw)
def test_AZP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
utils.assert_almost_equal(np.asarray(phi), wcs_phi)
utils.assert_almost_equal(np.asarray(theta), wcs_theta)
def test_AZP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
utils.assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
utils.assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
class TestCylindricalPerspective(object):
"""Test cylindrical perspective projection"""
def setup_class(self):
ID = "CYP"
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(ID))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw)
def test_CYP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
utils.assert_almost_equal(np.asarray(phi), wcs_phi)
utils.assert_almost_equal(np.asarray(theta), wcs_theta)
def test_CYP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
utils.assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
utils.assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
def test_AffineTransformation2D():
# Simple test with a scale and translation
model = projections.AffineTransformation2D(
matrix=[[2, 0], [0, 2]], translation=[1, 1])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
new_rect = np.vstack(model(x, y)).T
assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]])
def test_AffineTransformation2D_inverse():
# Test non-invertible model
model1 = projections.AffineTransformation2D(
matrix=[[1, 1], [1, 1]])
with pytest.raises(InputParameterError):
model1.inverse
model2 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
x_new, y_new = model2.inverse(*model2(x, y))
utils.assert_allclose([x, y], [x_new, y_new], atol=1e-10)
def test_c_projection_striding():
# This is just a simple test to make sure that the striding is
# handled correctly in the projection C extension
coords = np.arange(10).reshape((5, 2))
model = projections.Sky2Pix_ZenithalPerspective(2, 30)
phi, theta = model(coords[:, 0], coords[:, 1])
utils.assert_almost_equal(
phi,
[0., 2.2790416, 4.4889294, 6.6250643, 8.68301])
utils.assert_almost_equal(
theta,
[-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629])
def test_c_projections_shaped():
nx, ny = (5, 2)
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)
xv, yv = np.meshgrid(x, y)
model = projections.Pix2Sky_TAN()
phi, theta = model(xv, yv)
utils.assert_allclose(
phi,
[[0., 90., 90., 90., 90.,],
[180., 165.96375653, 153.43494882, 143.13010235, 135.]])
utils.assert_allclose(
theta,
[[90., 89.75000159, 89.50001269, 89.25004283, 89.00010152],
[89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353]])
|
|
import types
import collections
# Base node
class SourceElement(object):
'''
A SourceElement is the base class for all elements that occur in a Java
file parsed by plyj.
'''
def __init__(self):
super(SourceElement, self).__init__()
self._fields = []
def __repr__(self):
equals = ("{0}={1!r}".format(k, getattr(self, k))
for k in self._fields)
args = ", ".join(equals)
return "{0}({1})".format(self.__class__.__name__, args)
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def toJson(self):
def _toJson(o):
if hasattr(o, 'toJson'):
return o.toJson()
else:
if isinstance(o, collections.Iterable) and not isinstance(o, types.StringTypes):
return [ _toJson(element) for element in o ]
else:
return o
fields = dict((k, _toJson(getattr(self, k))) for k in self._fields)
return { 'p_type' : self.__class__.__name__,
'p_fields' : fields }
def accept(self, visitor):
"""
default implementation that visit the subnodes in the order
they are stored in self_field
"""
class_name = self.__class__.__name__
visit = getattr(visitor, 'visit_' + class_name)
if visit(self):
for f in self._fields:
field = getattr(self, f)
if field:
if isinstance(field, list):
for elem in field:
if isinstance(elem, SourceElement):
elem.accept(visitor)
elif isinstance(field, SourceElement):
field.accept(visitor)
getattr(visitor, 'leave_' + class_name)(self)
class CompilationUnit(SourceElement):
def __init__(self, package_declaration=None, import_declarations=None,
type_declarations=None):
super(CompilationUnit, self).__init__()
self._fields = [
'package_declaration', 'import_declarations', 'type_declarations']
if import_declarations is None:
import_declarations = []
if type_declarations is None:
type_declarations = []
self.package_declaration = package_declaration
self.import_declarations = import_declarations
self.type_declarations = type_declarations
class PackageDeclaration(SourceElement):
def __init__(self, name, modifiers=None):
super(PackageDeclaration, self).__init__()
self._fields = ['name', 'modifiers']
if modifiers is None:
modifiers = []
self.name = name
self.modifiers = modifiers
class ImportDeclaration(SourceElement):
def __init__(self, name, static=False, on_demand=False):
super(ImportDeclaration, self).__init__()
self._fields = ['name', 'static', 'on_demand']
self.name = name
self.static = static
self.on_demand = on_demand
class ClassDeclaration(SourceElement):
def __init__(self, name, body, modifiers=None, type_parameters=None,
extends=None, implements=None):
super(ClassDeclaration, self).__init__()
self._fields = ['name', 'body', 'modifiers',
'type_parameters', 'extends', 'implements']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if implements is None:
implements = []
self.name = name
self.body = body
self.modifiers = modifiers
self.type_parameters = type_parameters
self.extends = extends
self.implements = implements
class ClassInitializer(SourceElement):
def __init__(self, block, static=False):
super(ClassInitializer, self).__init__()
self._fields = ['block', 'static']
self.block = block
self.static = static
class ConstructorDeclaration(SourceElement):
def __init__(self, name, block, modifiers=None, type_parameters=None,
parameters=None, throws=None):
super(ConstructorDeclaration, self).__init__()
self._fields = ['name', 'block', 'modifiers',
'type_parameters', 'parameters', 'throws']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if parameters is None:
parameters = []
self.name = name
self.block = block
self.modifiers = modifiers
self.type_parameters = type_parameters
self.parameters = parameters
self.throws = throws
class EmptyDeclaration(SourceElement):
pass
class FieldDeclaration(SourceElement):
def __init__(self, type, variable_declarators, modifiers=None):
super(FieldDeclaration, self).__init__()
self._fields = ['type', 'variable_declarators', 'modifiers']
if modifiers is None:
modifiers = []
self.type = type
self.variable_declarators = variable_declarators
self.modifiers = modifiers
class MethodDeclaration(SourceElement):
def __init__(self, name, modifiers=None, type_parameters=None,
parameters=None, return_type='void', body=None, abstract=False,
extended_dims=0, throws=None, lineno=None):
super(MethodDeclaration, self).__init__()
self._fields = ['name', 'modifiers', 'type_parameters', 'parameters',
'return_type', 'body', 'abstract', 'extended_dims',
'throws', 'lineno']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if parameters is None:
parameters = []
self.name = name
self.modifiers = modifiers
self.type_parameters = type_parameters
self.parameters = parameters
self.return_type = return_type
self.body = body
self.abstract = abstract
self.extended_dims = extended_dims
self.throws = throws
self.lineno = lineno
class FormalParameter(SourceElement):
def __init__(self, variable, type, modifiers=None, vararg=False):
super(FormalParameter, self).__init__()
self._fields = ['variable', 'type', 'modifiers', 'vararg']
if modifiers is None:
modifiers = []
self.variable = variable
self.type = type
self.modifiers = modifiers
self.vararg = vararg
class Variable(SourceElement):
# I would like to remove this class. In theory, the dimension could be added
# to the type but this means variable declarations have to be changed
# somehow. Consider 'int i, j[];'. In this case there currently is only one
# type with two variable declarators;This closely resembles the source code.
# If the variable is to go away, the type has to be duplicated for every
# variable...
def __init__(self, name, dimensions=0):
super(Variable, self).__init__()
self._fields = ['name', 'dimensions']
self.name = name
self.dimensions = dimensions
class VariableDeclarator(SourceElement):
def __init__(self, variable, initializer=None):
super(VariableDeclarator, self).__init__()
self._fields = ['variable', 'initializer']
self.variable = variable
self.initializer = initializer
class Throws(SourceElement):
def __init__(self, types):
super(Throws, self).__init__()
self._fields = ['types']
self.types = types
class InterfaceDeclaration(SourceElement):
def __init__(self, name, modifiers=None, extends=None, type_parameters=None,
body=None):
super(InterfaceDeclaration, self).__init__()
self._fields = [
'name', 'modifiers', 'extends', 'type_parameters', 'body']
if modifiers is None:
modifiers = []
if extends is None:
extends = []
if type_parameters is None:
type_parameters = []
if body is None:
body = []
self.name = name
self.modifiers = modifiers
self.extends = extends
self.type_parameters = type_parameters
self.body = body
class EnumDeclaration(SourceElement):
def __init__(self, name, implements=None, modifiers=None,
type_parameters=None, body=None):
super(EnumDeclaration, self).__init__()
self._fields = [
'name', 'implements', 'modifiers', 'type_parameters', 'body']
if implements is None:
implements = []
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if body is None:
body = []
self.name = name
self.implements = implements
self.modifiers = modifiers
self.type_parameters = type_parameters
self.body = body
class EnumConstant(SourceElement):
def __init__(self, name, arguments=None, modifiers=None, body=None):
super(EnumConstant, self).__init__()
self._fields = ['name', 'arguments', 'modifiers', 'body']
if arguments is None:
arguments = []
if modifiers is None:
modifiers = []
if body is None:
body = []
self.name = name
self.arguments = arguments
self.modifiers = modifiers
self.body = body
class AnnotationDeclaration(SourceElement):
def __init__(self, name, modifiers=None, type_parameters=None, extends=None,
implements=None, body=None):
super(AnnotationDeclaration, self).__init__()
self._fields = [
'name', 'modifiers', 'type_parameters', 'extends', 'implements',
'body']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if implements is None:
implements = []
if body is None:
body = []
self.name = name
self.modifiers = modifiers
self.type_parameters = type_parameters
self.extends = extends
self.implements = implements
self.body = body
class AnnotationMethodDeclaration(SourceElement):
def __init__(self, name, type, parameters=None, default=None,
modifiers=None, type_parameters=None, extended_dims=0):
super(AnnotationMethodDeclaration, self).__init__()
self._fields = ['name', 'type', 'parameters', 'default',
'modifiers', 'type_parameters', 'extended_dims']
if parameters is None:
parameters = []
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
self.name = name
self.type = type
self.parameters = parameters
self.default = default
self.modifiers = modifiers
self.type_parameters = type_parameters
self.extended_dims = extended_dims
class Annotation(SourceElement):
def __init__(self, name, members=None, single_member=None):
super(Annotation, self).__init__()
self._fields = ['name', 'members', 'single_member']
if members is None:
members = []
self.name = name
self.members = members
self.single_member = single_member
class AnnotationMember(SourceElement):
def __init__(self, name, value):
super(SourceElement, self).__init__()
self._fields = ['name', 'value']
self.name = name
self.value = value
class Type(SourceElement):
def __init__(self, name, type_arguments=None, enclosed_in=None,
dimensions=0):
super(Type, self).__init__()
self._fields = ['name', 'type_arguments', 'enclosed_in', 'dimensions']
if type_arguments is None:
type_arguments = []
self.name = name
self.type_arguments = type_arguments
self.enclosed_in = enclosed_in
self.dimensions = dimensions
class Wildcard(SourceElement):
def __init__(self, bounds=None):
super(Wildcard, self).__init__()
self._fields = ['bounds']
if bounds is None:
bounds = []
self.bounds = bounds
class WildcardBound(SourceElement):
def __init__(self, type, extends=False, _super=False):
super(WildcardBound, self).__init__()
self._fields = ['type', 'extends', '_super']
self.type = type
self.extends = extends
self._super = _super
class TypeParameter(SourceElement):
def __init__(self, name, extends=None):
super(TypeParameter, self).__init__()
self._fields = ['name', 'extends']
if extends is None:
extends = []
self.name = name
self.extends = extends
class Expression(SourceElement):
def __init__(self):
super(Expression, self).__init__()
self._fields = []
class BinaryExpression(Expression):
def __init__(self, operator, lhs, rhs):
super(BinaryExpression, self).__init__()
self._fields = ['operator', 'lhs', 'rhs']
self.operator = operator
self.lhs = lhs
self.rhs = rhs
class Assignment(BinaryExpression):
pass
class Conditional(Expression):
def __init__(self, predicate, if_true, if_false):
super(self.__class__, self).__init__()
self._fields = ['predicate', 'if_true', 'if_false']
self.predicate = predicate
self.if_true = if_true
self.if_false = if_false
class ConditionalOr(BinaryExpression):
pass
class ConditionalAnd(BinaryExpression):
pass
class Or(BinaryExpression):
pass
class Xor(BinaryExpression):
pass
class And(BinaryExpression):
pass
class Equality(BinaryExpression):
pass
class InstanceOf(BinaryExpression):
pass
class Relational(BinaryExpression):
pass
class Shift(BinaryExpression):
pass
class Additive(BinaryExpression):
pass
class Multiplicative(BinaryExpression):
pass
class Unary(Expression):
def __init__(self, sign, expression):
super(Unary, self).__init__()
self._fields = ['sign', 'expression']
self.sign = sign
self.expression = expression
class Cast(Expression):
def __init__(self, target, expression):
super(Cast, self).__init__()
self._fields = ['target', 'expression']
self.target = target
self.expression = expression
class Statement(SourceElement):
pass
class Empty(Statement):
pass
class Block(Statement):
def __init__(self, statements=None):
super(Statement, self).__init__()
self._fields = ['statements']
if statements is None:
statements = []
self.statements = statements
def __iter__(self):
for s in self.statements:
yield s
class VariableDeclaration(Statement, FieldDeclaration):
pass
class ArrayInitializer(SourceElement):
def __init__(self, elements=None):
super(ArrayInitializer, self).__init__()
self._fields = ['elements']
if elements is None:
elements = []
self.elements = elements
class MethodInvocation(Expression):
def __init__(self, name, arguments=None, type_arguments=None, target=None):
super(MethodInvocation, self).__init__()
self._fields = ['name', 'arguments', 'type_arguments', 'target']
if arguments is None:
arguments = []
if type_arguments is None:
type_arguments = []
self.name = name
self.arguments = arguments
self.type_arguments = type_arguments
self.target = target
class IfThenElse(Statement):
def __init__(self, predicate, if_true=None, if_false=None):
super(IfThenElse, self).__init__()
self._fields = ['predicate', 'if_true', 'if_false']
self.predicate = predicate
self.if_true = if_true
self.if_false = if_false
class While(Statement):
def __init__(self, predicate, body=None):
super(While, self).__init__()
self._fields = ['predicate', 'body']
self.predicate = predicate
self.body = body
class For(Statement):
def __init__(self, init, predicate, update, body):
super(For, self).__init__()
self._fields = ['init', 'predicate', 'update', 'body']
self.init = init
self.predicate = predicate
self.update = update
self.body = body
class ForEach(Statement):
def __init__(self, type, variable, iterable, body, modifiers=None):
super(ForEach, self).__init__()
self._fields = ['type', 'variable', 'iterable', 'body', 'modifiers']
if modifiers is None:
modifiers = []
self.type = type
self.variable = variable
self.iterable = iterable
self.body = body
self.modifiers = modifiers
class Assert(Statement):
def __init__(self, predicate, message=None):
super(Assert, self).__init__()
self._fields = ['predicate', 'message']
self.predicate = predicate
self.message = message
class Switch(Statement):
def __init__(self, expression, switch_cases):
super(Switch, self).__init__()
self._fields = ['expression', 'switch_cases']
self.expression = expression
self.switch_cases = switch_cases
class SwitchCase(SourceElement):
def __init__(self, cases, body=None):
super(SwitchCase, self).__init__()
self._fields = ['cases', 'body']
if body is None:
body = []
self.cases = cases
self.body = body
class DoWhile(Statement):
def __init__(self, predicate, body=None):
super(DoWhile, self).__init__()
self._fields = ['predicate', 'body']
self.predicate = predicate
self.body = body
class Continue(Statement):
def __init__(self, label=None):
super(Continue, self).__init__()
self._fields = ['label']
self.label = label
class Break(Statement):
def __init__(self, label=None):
super(Break, self).__init__()
self._fields = ['label']
self.label = label
class Return(Statement):
def __init__(self, result=None):
super(Return, self).__init__()
self._fields = ['result']
self.result = result
class Synchronized(Statement):
def __init__(self, monitor, body):
super(Synchronized, self).__init__()
self._fields = ['monitor', 'body']
self.monitor = monitor
self.body = body
class Throw(Statement):
def __init__(self, exception):
super(Throw, self).__init__()
self._fields = ['exception']
self.exception = exception
class Try(Statement):
def __init__(self, block, catches=None, _finally=None, resources=None):
super(Try, self).__init__()
self._fields = ['block', 'catches', '_finally', 'resources']
if catches is None:
catches = []
if resources is None:
resources = []
self.block = block
self.catches = catches
self._finally = _finally
self.resources = resources
def accept(self, visitor):
if visitor.visit_Try(self):
for s in self.block:
s.accept(visitor)
for c in self.catches:
visitor.visit_Catch(c)
if self._finally:
self._finally.accept(visitor)
class Catch(SourceElement):
def __init__(self, variable, modifiers=None, types=None, block=None):
super(Catch, self).__init__()
self._fields = ['variable', 'modifiers', 'types', 'block']
if modifiers is None:
modifiers = []
if types is None:
types = []
self.variable = variable
self.modifiers = modifiers
self.types = types
self.block = block
class Resource(SourceElement):
def __init__(self, variable, type=None, modifiers=None, initializer=None):
super(Resource, self).__init__()
self._fields = ['variable', 'type', 'modifiers', 'initializer']
if modifiers is None:
modifiers = []
self.variable = variable
self.type = type
self.modifiers = modifiers
self.initializer = initializer
class ConstructorInvocation(Statement):
"""An explicit invocations of a class's constructor.
This is a variant of either this() or super(), NOT a "new" expression.
"""
def __init__(self, name, target=None, type_arguments=None, arguments=None):
super(ConstructorInvocation, self).__init__()
self._fields = ['name', 'target', 'type_arguments', 'arguments']
if type_arguments is None:
type_arguments = []
if arguments is None:
arguments = []
self.name = name
self.target = target
self.type_arguments = type_arguments
self.arguments = arguments
class InstanceCreation(Expression):
def __init__(self, type, type_arguments=None, arguments=None, body=None,
enclosed_in=None):
super(InstanceCreation, self).__init__()
self._fields = [
'type', 'type_arguments', 'arguments', 'body', 'enclosed_in']
if type_arguments is None:
type_arguments = []
if arguments is None:
arguments = []
if body is None:
body = []
self.type = type
self.type_arguments = type_arguments
self.arguments = arguments
self.body = body
self.enclosed_in = enclosed_in
class FieldAccess(Expression):
def __init__(self, name, target):
super(FieldAccess, self).__init__()
self._fields = ['name', 'target']
self.name = name
self.target = target
class ArrayAccess(Expression):
def __init__(self, index, target):
super(ArrayAccess, self).__init__()
self._fields = ['index', 'target']
self.index = index
self.target = target
class ArrayCreation(Expression):
def __init__(self, type, dimensions=None, initializer=None):
super(ArrayCreation, self).__init__()
self._fields = ['type', 'dimensions', 'initializer']
if dimensions is None:
dimensions = []
self.type = type
self.dimensions = dimensions
self.initializer = initializer
class Literal(SourceElement):
def __init__(self, value):
super(Literal, self).__init__()
self._fields = ['value']
self.value = value
class ClassLiteral(SourceElement):
def __init__(self, type):
super(ClassLiteral, self).__init__()
self._fields = ['type']
self.type = type
class Name(SourceElement):
def __init__(self, value):
super(Name, self).__init__()
self._fields = ['value']
self.value = value
def append_name(self, name):
try:
self.value = self.value + '.' + name.value
except:
self.value = self.value + '.' + name
class ExpressionStatement(Statement):
def __init__(self, expression):
super(ExpressionStatement, self).__init__()
self._fields = ['expression']
self.expression = expression
class Visitor(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __getattr__(self, name):
if not (name.startswith('visit_') or name.startswith('leave_')):
raise AttributeError('name must start with visit_ or leave_ but was {}'
.format(name))
def f(element):
if self.verbose:
msg = 'unimplemented call to {}; ignoring ({})'
print(msg.format(name, element))
return True
return f
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry for layers and their parameters/variables.
This represents the collection of all layers in the approximate Fisher
information matrix to which a particular FisherBlock may belong. That is, we
might have several layer collections for one TF graph (if we have multiple K-FAC
optimizers being used, for example.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import OrderedDict
import six
from tensorflow.contrib.kfac.python.ops import fisher_blocks as fb
from tensorflow.contrib.kfac.python.ops import loss_functions as lf
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# Names for various approximations that can be requested for Fisher blocks.
APPROX_KRONECKER_NAME = "kron"
APPROX_DIAGONAL_NAME = "diagonal"
APPROX_FULL_NAME = "full"
# Possible value for 'reuse' keyword argument. Sets 'reuse' to
# tf.get_variable_scope().reuse.
VARIABLE_SCOPE = "VARIABLE_SCOPE"
# TODO(jamesmartens): need to add find_canonical_output back into this somewhere
class LayerParametersDict(OrderedDict):
"""An OrderedDict where keys are Tensors or tuples of Tensors.
Ensures that no Tensor is associated with two different keys.
"""
def __init__(self, *args, **kwargs):
self._tensors = set()
super(LayerParametersDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
key = self._canonicalize_key(key)
tensors = key if isinstance(key, (tuple, list)) else (key,)
key_collisions = self._tensors.intersection(tensors)
if key_collisions:
raise ValueError("Key(s) already present: {}".format(key_collisions))
self._tensors.update(tensors)
super(LayerParametersDict, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._canonicalize_key(key)
self._tensors.remove(key)
super(LayerParametersDict, self).__delitem__(key)
def __getitem__(self, key):
key = self._canonicalize_key(key)
return super(LayerParametersDict, self).__getitem__(key)
def __contains__(self, key):
key = self._canonicalize_key(key)
return super(LayerParametersDict, self).__contains__(key)
def _canonicalize_key(self, key):
if isinstance(key, (list, tuple)):
return tuple(key)
return key
# TODO(b/68034464): add capability for LayerCollection to be "finalized"
# and do this when it gets used by FisherEstimator / KfacOptimizer.
class LayerCollection(object):
"""Registry of information about layers and losses.
Note that you need to create a new one of these for each MatrixEstimator or
KfacOptimizer.
Attributes:
fisher_blocks: a LayersParamsDict (subclass of OrderedDict) mapping layer
parameters (Tensors or tuples of Tensors) to FisherBlock instances.
fisher_factors: an OrderedDict mapping tuples to FisherFactor instances.
generic_registrations: a list of variables registered via a generic layer
registration. Generic registrations handle any and all of the ways a
variable is used in the graph, which means we don't need to check
their registration when verifying the correctness of the graph.
losses: a list of LossFunction objects. The loss to be optimized is their
sum.
"""
def __init__(self, graph=None, name="LayerCollection"):
self.fisher_blocks = LayerParametersDict()
self.fisher_factors = OrderedDict()
self._generic_registrations = set()
self._graph = graph or ops.get_default_graph()
self._loss_dict = {} # {str: LossFunction}
self._subgraph = None
with variable_scope.variable_scope(None, default_name=name) as scope:
self._var_scope = scope.name
@property
def losses(self):
"""LossFunctions registered with this LayerCollection."""
return list(self._loss_dict.values())
def register_block(self, layer_key, fisher_block):
"""Validates and registers the layer_key associated with the fisher_block.
Validation consists of checking whether the key was already registered or
if any of the elements of layer_key (if it's a tuple) were already
registered as part of another tuple (throws an error if so). If any of the
elements were registered by themselves, or as part of tuples that are
subsets of this layer_key, those registrations are first removed.
If the layer_key is a subset of an existing registration, registration of
the new, smaller layer_key is skipped.
e.g. If registrations include {'a': foo, ('b', 'c'): bar}, then
- register_layer('a', baz) -> ValueError
- register_layer(('b', 'c', 'd'), baz) ->
{'a': foo, ('b', 'c', 'd'): baz}
- register_layer('b', baz) ->
{'a': foo, ('b', 'c'): bar} (No change)
- register_layer(('a', 'd'), baz) ->
{('a', 'd'): baz, ('b', 'c'): bar}
- register_layer(('b', 'd'), baz) -> ValueError
Args:
layer_key: The key to check for in existing registrations and to register
if valid.
fisher_block: The associated fisher block.
Raises:
ValueError: If the layer_key was already registered, or if a subset of the
layer_key has already been registered as part of a different tuple.
"""
if layer_key in self.fisher_blocks:
raise ValueError("Duplicate registration: {}".format(layer_key))
if isinstance(layer_key, (tuple, list)):
self._register_block_with_sequence_key(layer_key, fisher_block)
else:
self._register_block_with_nonsequence_key(layer_key, fisher_block)
def _register_block_with_sequence_key(self, layer_key, fisher_block):
"""Validates and registers the layer_key if it's a sequence."""
inclusions = {
fisher_elt
for layer_elt in layer_key for fisher_elt in self.fisher_blocks
if self._equal_or_subset(layer_elt, fisher_elt)
}
if not inclusions:
self.fisher_blocks[layer_key] = fisher_block
return
for key in inclusions:
fisher_block_key = key if isinstance(key, (tuple, list)) else (key,)
if set(layer_key).issubset(fisher_block_key):
logging.warning("Graph Registration Warning: tried to register "
"a subset ({}) of an already registered tuple "
"({}), skipping".format(layer_key, fisher_block_key))
return
if not set(fisher_block_key).issubset(layer_key):
raise ValueError(
"Inconsistent registration, expected new key to be a subset or "
"superset of the existing key: existing is {}, new is {}".format(
key, layer_key))
else:
self.fisher_blocks.pop(key)
self.fisher_blocks[layer_key] = fisher_block
def _register_block_with_nonsequence_key(self, layer_key, fisher_block):
"""Validates and registers the layer_key if it's not a sequence."""
inclusions = {
fisher_elt
for fisher_elt in self.fisher_blocks
if self._equal_or_subset(layer_key, fisher_elt)
}
if not inclusions:
self.fisher_blocks[layer_key] = fisher_block
else:
logging.warning("Graph Registration Warning: tried to register "
"variable ({}) but a containing tuple was already "
"registered ({}), skipping".format(layer_key, inclusions))
def _equal_or_subset(self, elt1, elt2):
"""Checks if the elements are equal or one is contained in the other."""
return (elt1 == elt2 or (isinstance(elt1,
(tuple, list)) and elt2 in elt1) or
(isinstance(elt2, (tuple, list)) and elt1 in elt2))
def get_use_count_map(self):
"""Returns a dict of variables to their number of registrations."""
vars_to_uses = defaultdict(int)
for key, block in six.iteritems(self.fisher_blocks):
key = key if isinstance(key, (tuple, list)) else (key,)
for k in key:
vars_to_uses[k] += block.num_registered_minibatches
return vars_to_uses
def get_blocks(self):
return self.fisher_blocks.values()
def get_factors(self):
return self.fisher_factors.values()
@property
def generic_registrations(self):
return self._generic_registrations
@property
def graph(self):
return self._graph
@property
def subgraph(self):
return self._subgraph
def create_subgraph(self):
if not self.losses:
raise ValueError("Must have at least one registered loss.")
inputs_to_losses = nest.flatten(tuple(loss.inputs for loss in self.losses))
self._subgraph = utils.SubGraph(inputs_to_losses)
def total_loss(self):
return math_ops.add_n(tuple(loss.evaluate() for loss in self.losses))
def total_sampled_loss(self):
return math_ops.add_n(
tuple(loss.evaluate_on_sample() for loss in self.losses))
def register_fully_connected(self,
params,
inputs,
outputs,
approx=APPROX_KRONECKER_NAME,
reuse=VARIABLE_SCOPE):
"""Registers a fully connnected layer.
Args:
params: Tensor or 2-tuple of Tensors corresponding to weight and bias of
this layer. Weight matrix should have shape [input_size, output_size].
Bias should have shape [output_size].
inputs: Tensor of shape [batch_size, input_size]. Inputs to layer.
outputs: Tensor of shape [batch_size, output_size]. Preactivations
produced by layer.
approx: str. One of APPROX_KRONECKER_NAME or APPROX_DIAGONAL_NAME.
reuse: bool or str. If True, reuse an existing FisherBlock. If False,
create a new FisherBlock. If VARIABLE_SCOPE, use
tf.get_variable_scope().reuse.
Raises:
ValueError: For improper value to 'approx'.
KeyError: If reuse == True but no FisherBlock found for 'params'.
ValueError: If reuse == True and FisherBlock found but of the wrong type.
"""
approx_to_block_types = {
APPROX_KRONECKER_NAME: fb.FullyConnectedKFACBasicFB,
APPROX_DIAGONAL_NAME: fb.FullyConnectedDiagonalFB,
}
if approx not in approx_to_block_types:
raise ValueError("Bad value {} for approx.".format(approx))
block_type = approx_to_block_types[approx]
has_bias = isinstance(params, (tuple, list))
if reuse == VARIABLE_SCOPE:
reuse = variable_scope.get_variable_scope().reuse
if reuse:
block = self.fisher_blocks.get(params, None)
if block is None:
raise KeyError(
"Reuse requested but no FisherBlock found for params {}.".format(
params))
if not isinstance(block, block_type):
raise ValueError(
"Requested block of type {} but block of type {} already exists "
"for params {}.".format(block_type, type(block), params))
else:
block = block_type(self, has_bias)
self.register_block(params, block)
block.register_additional_minibatch(inputs, outputs)
def register_conv2d(self, params, strides, padding, inputs, outputs,
approx=APPROX_KRONECKER_NAME):
if approx == APPROX_KRONECKER_NAME:
self.register_block(params,
fb.ConvKFCBasicFB(self, params, inputs, outputs,
strides, padding))
elif approx == APPROX_DIAGONAL_NAME:
block = fb.ConvDiagonalFB(self, params, strides, padding)
block.register_additional_minibatch(inputs, outputs)
self.register_block(params, block)
def register_generic(self, params, batch_size, approx=APPROX_DIAGONAL_NAME):
params = params if isinstance(params, (tuple, list)) else (params,)
self._generic_registrations |= set(params)
# Generic registrations do not need special registration rules because we do
# not care about multiple generic registrations. Add them to the
# fisher_block dictionary manually rather than going through the logic in
# self.register_block.
if approx == APPROX_FULL_NAME:
self.fisher_blocks[params] = fb.FullFB(self, params, batch_size)
elif approx == APPROX_DIAGONAL_NAME:
self.fisher_blocks[params] = fb.NaiveDiagonalFB(self, params, batch_size)
else:
raise ValueError("Bad value {} for approx.".format(approx))
def register_categorical_predictive_distribution(self,
logits,
seed=None,
targets=None,
name=None):
"""Registers a categorical predictive distribution.
Args:
logits: The logits of the distribution (i.e. its parameters).
seed: The seed for the RNG (for debugging) (Default: None)
targets: (OPTIONAL) The targets for the loss function. Only required if
one wants to call total_loss() instead of total_sampled_loss().
total_loss() is required, for example, to estimate the
"empirical Fisher" (instead of the true Fisher).
(Default: None)
name: (OPTIONAL) str or None. Unique name for this loss function. If None,
a new name is generated. (Default: None)
"""
name = name or self._graph.unique_name(
"register_categorical_predictive_distribution")
if name in self._loss_dict:
raise NotImplementedError(
"Adding logits to an existing LossFunction not yet supported.")
loss = lf.CategoricalLogitsNegativeLogProbLoss(
logits, targets=targets, seed=seed)
self._loss_dict[name] = loss
def register_normal_predictive_distribution(self,
mean,
var=0.5,
seed=None,
targets=None,
name=None):
"""Registers a normal predictive distribution.
Args:
mean: The mean vector defining the distribution.
var: The variance (must be a scalar). Note that the default value of
0.5 corresponds to a standard squared error loss (target -
prediction)**2. If your squared error loss is of the form
0.5*(target - prediction)**2 you should use var=1.0. (Default: 0.5)
seed: The seed for the RNG (for debugging) (Default: None)
targets: (OPTIONAL) The targets for the loss function. Only required if
one wants to call total_loss() instead of total_sampled_loss().
total_loss() is required, for example, to estimate the
"empirical Fisher" (instead of the true Fisher).
(Default: None)
name: (OPTIONAL) str or None. Unique name for this loss function. If None,
a new name is generated. (Default: None)
"""
name = name or self._graph.unique_name(
"register_normal_predictive_distribution")
if name in self._loss_dict:
raise NotImplementedError(
"Adding logits to an existing LossFunction not yet supported.")
loss = lf.NormalMeanNegativeLogProbLoss(
mean, var, targets=targets, seed=seed)
self._loss_dict[name] = loss
def register_multi_bernoulli_predictive_distribution(self,
logits,
seed=None,
targets=None,
name=None):
"""Registers a multi-Bernoulli predictive distribution.
Args:
logits: The logits of the distribution (i.e. its parameters).
seed: The seed for the RNG (for debugging) (Default: None)
targets: (OPTIONAL) The targets for the loss function. Only required if
one wants to call total_loss() instead of total_sampled_loss().
total_loss() is required, for example, to estimate the
"empirical Fisher" (instead of the true Fisher).
(Default: None)
name: (OPTIONAL) str or None. Unique name for this loss function. If None,
a new name is generated. (Default: None)
"""
name = name or self._graph.unique_name(
"register_multi_bernoulli_predictive_distribution")
if name in self._loss_dict:
raise NotImplementedError(
"Adding logits to an existing LossFunction not yet supported.")
loss = lf.MultiBernoulliNegativeLogProbLoss(
logits, targets=targets, seed=seed)
self._loss_dict[name] = loss
def make_or_get_factor(self, cls, args):
"""Insert 'cls(args)' into 'self.fisher_factors' if not already present.
Wraps constructor in 'tf.variable_scope()' to ensure variables constructed
in 'cls.__init__' are placed under this LayerCollection's scope.
Args:
cls: Class that implements FisherFactor.
args: Tuple of arguments to pass into 'cls's constructor. Must be
hashable.
Returns:
Instance of 'cls' found in self.fisher_factors.
"""
try:
hash(args)
except TypeError:
raise TypeError((
"Unable to use (cls, args) = ({}, {}) as a key in "
"LayerCollection.fisher_factors. The pair cannot be hashed."
).format(cls, args))
with variable_scope.variable_scope(self._var_scope):
return utils.setdefault(self.fisher_factors, (cls, args),
lambda: cls(*args))
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pydot
from cairis.core.Borg import Borg
from cairis.core.ARM import *
from cairis.core.colourcodes import usabilityColourCode
from cairis.core.colourcodes import usabilityTextColourCode
from cairis.core.colourcodes import probabilityTextColourCode
from cairis.core.colourcodes import threatColourCode
from cairis.core.colourcodes import riskTextColourCode
from cairis.core.colourcodes import obstacleColourCode
class KaosModel:
def __init__(self,associations,envName,kaosModelType = 'goal',goalName = '', db_proxy=None, font_name=None, font_size=None):
self.theAssociations = associations
self.theEnvironmentName = envName
self.theGoalName = goalName
self.dbProxy = db_proxy
self.fontName = font_name
self.fontSize = font_size
b = Borg()
if db_proxy is None:
self.dbProxy = b.dbProxy
if font_size is None or font_name is None:
self.fontName = b.fontName
self.fontSize = b.fontSize
self.theGraph = pydot.Dot()
self.theKaosModel = kaosModelType
if (self.theKaosModel == 'task'):
self.theGraph.set_graph_defaults(rankdir='LR')
else:
self.theGraph.set_graph_defaults(rankdir='BT')
self.theGraphName = b.tmpDir + '/' + self.theKaosModel + '.dot'
def size(self):
return len(self.theAssociations)
def buildNode(self,dimName,objtName):
if ((self.theKaosModel == 'template_goal') and (dimName == 'goal')):
dimName = 'template_goal'
objtUrl = dimName + '#' + objtName
b = Borg()
actorFile = b.assetDir + '/modelActor.png'
attackerFile = b.assetDir + '/modelAttacker.png'
if ((dimName == 'goal') or (dimName == 'template_goal')):
self.theGraph.add_node(pydot.Node(objtName,shape='parallelogram',margin=0,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'obstacle'):
obsId = self.dbProxy.getDimensionId(objtName,'obstacle')
envId = self.dbProxy.getDimensionId(self.theEnvironmentName,'environment')
obsProb,obsRationale = self.dbProxy.obstacleProbability(obsId,envId)
self.theGraph.add_node(pydot.Node(objtName,shape='polygon',margin=0,skew='-0.4',style='filled',pencolor='black',colorscheme='ylorrd9',fillcolor=obstacleColourCode(obsProb),fontname=self.fontName,fontsize=self.fontSize,fontcolor=probabilityTextColourCode(obsProb),URL=objtUrl))
elif (dimName == 'domainproperty'):
self.theGraph.add_node(pydot.Node(objtName,shape='house',margin=0,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'requirement'):
self.theGraph.add_node(pydot.Node(objtName,shape='parallelogram',margin=0,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'countermeasure'):
self.theGraph.add_node(pydot.Node(objtName,shape='hexagon',margin=0,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif ((dimName == 'role') and (self.theKaosModel != 'task')):
self.theGraph.add_node(pydot.Node(objtName,shape='hexagon',margin=0,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif ((dimName == 'role') and (self.theKaosModel == 'task')):
self.theGraph.add_node(pydot.Node(objtName,label='',xlabel=objtName,shapefile=actorFile,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl,peripheries='0'))
elif (dimName == 'usecase'):
self.theGraph.add_node(pydot.Node(objtName,shape='ellipse',margin=0,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'task'):
objt = self.dbProxy.dimensionObject(objtName,'task')
if (objt.assumption() == True):
objtLabel = "<<Assumption>>" + objtName
else:
objtLabel = objtName
taskScore = self.dbProxy.taskUsabilityScore(objtName,self.theEnvironmentName)
self.theGraph.add_node(pydot.Node(objtName,label=objtLabel,shape='ellipse',margin=0,style='filled',color=usabilityColourCode(taskScore),fontname=self.fontName,fontsize=self.fontSize,fontcolor=usabilityTextColourCode(taskScore),URL=objtUrl))
elif (dimName == 'misusecase'):
ellipseColour = 'black'
if (self.theKaosModel == 'task'):
try:
riskName = objtName[8:]
riskObjt = self.dbProxy.dimensionObject(riskName,'risk')
riskScores = self.dbProxy.riskScore(riskObjt.threat(),riskObjt.vulnerability(),self.theEnvironmentName,riskName)
highestScore = 0
for riskScore in riskScores:
currentScore = riskScore[2]
if (currentScore > highestScore):
highestScore = currentScore
ellipseColour = threatColourCode(highestScore)
except TypeError as ex:
raise ARMException("Error processing risk " + riskName + " in task model" + format(ex))
self.theGraph.add_node(pydot.Node(objtName,shape='ellipse',margin=0,style='filled',color=ellipseColour,fontcolor=riskTextColourCode(highestScore),fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'persona'):
objt = self.dbProxy.dimensionObject(objtName,'persona')
if (objt.assumption() == True):
objtLabel = "<<Assumption>>" + objtName
self.theGraph.add_node(pydot.Node(objtName,label='',xlabel=objtLabel,shapefile=actorFile,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl,peripheries='0'))
else:
self.theGraph.add_node(pydot.Node(objtName,label='',xlabel=objtName,shapefile=actorFile,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl,peripheries='0'))
elif (dimName == 'attacker'):
self.theGraph.add_node(pydot.Node(objtName,label='',xlabel=objtName,shapefile=attackerFile,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl,peripheries='0'))
elif (dimName == 'response'):
self.theGraph.add_node(pydot.Node(objtName,shape='note',margin=0,fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'asset'):
fontColour = 'black'
nodeColour = 'black'
if (self.theKaosModel == 'task'):
fontColour = 'blue'
nodeColour = 'blue'
self.theGraph.add_node(pydot.Node(objtName,shape='record',margin=0,fontname=self.fontName,fontsize=self.fontSize,fontcolor=fontColour,color=nodeColour,URL=objtUrl))
else:
raise UnknownNodeType(dimName)
def layout(self,renderer = ''):
if (renderer == ''):
if ((self.theKaosModel == 'goal') or (self.theKaosModel == 'template_goal') or (self.theKaosModel == 'obstacle')):
renderer = 'dot'
if (self.theKaosModel == 'responsibility'):
renderer = 'twopi'
elif (self.theKaosModel == 'task'):
renderer = 'dot'
self.theGraph.write_xdot(self.theGraphName,prog=renderer)
return open(self.theGraphName).read()
def buildGoalModel(self,isComponent=False):
b = Borg()
conflictFile = b.assetDir + '/modelConflict.png'
self.nodeNameSet = set([])
refNodes = set([])
# the Graph get_edge function doesn't appear to work, so we'll keep a set of edges ourselves.
edgeSet = set([])
for association in self.theAssociations:
goalName = association.goal()
associationType = association.type()
subGoalName = association.subGoal()
alternativeFlag = association.alternative()
goalDimName = association.goalDimension()
subGoalDimName = association.subGoalDimension()
goalEnv = association.environment()
if ((self.theGoalName != '' or isComponent == True) and goalName not in self.nodeNameSet):
self.buildNode(goalDimName,goalName)
if ((self.theGoalName != '' or isComponent == True) and subGoalName not in self.nodeNameSet):
self.buildNode(subGoalDimName,subGoalName)
if ((associationType == 'obstruct') or (associationType == 'resolve')):
if ((subGoalName,goalName) not in edgeSet):
goalEdge = pydot.Edge(subGoalName,goalName,dir='forward',arrowhead='veetee',weight='1')
self.theGraph.add_edge(goalEdge)
edgeSet.add((subGoalName,goalName))
elif (associationType == 'depender'):
if ((subGoalName,goalName) not in edgeSet):
goalEdge = pydot.Edge(goalName,subGoalName,dir='forward',arrowhead='curve',weight='1')
self.theGraph.add_edge(goalEdge)
edgeSet.add((goalName,subGoalName))
elif (associationType == 'dependee'):
if ((subGoalName,goalName) not in edgeSet):
goalEdge = pydot.Edge(goalName,subGoalName,dir='forward',arrowhead='curve',weight='1')
self.theGraph.add_edge(goalEdge)
edgeSet.add((goalName,subGoalName))
else:
refNodeName = goalName + '#' + associationType
# This is probably a good time to see if there is already another goalassociation in the graph for another environment
assocDirection = 'forward'
arrowHead = 'vee'
if ((subGoalName,refNodeName) not in edgeSet):
objtUrl = 'goalassociation#' + goalEnv + '/' + goalName + '/' + subGoalName
if (alternativeFlag == 1):
refNodeName = goalName + '#' + subGoalName + '#' + associationType
if (refNodeName not in refNodes):
if (subGoalDimName in ['task','usecase']):
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',style='filled',color='blue',label=' ',height='.2',width='.2'))
elif ((subGoalDimName == 'countermeasure') and (associationType == 'and')):
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',style='filled',color='blue',label=' ',height='.2',width='.2'))
elif (associationType == 'and'):
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',label=' ',height='.2',width='.2'))
elif (associationType == 'or'):
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',style='filled',color='black',label=' ',height='.2',width='.2'))
elif (associationType == 'responsible'):
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',style='filled',color='red',label=' ',height='.2',width='.2'))
elif ((associationType == 'conflict') or (associationType == 'obstruct')):
b = Borg()
self.theGraph.add_node(pydot.Node(refNodeName,shapefile=conflictFile,margin=0,label='',height='.1',width='.1',peripheries='0'))
assocDirection = 'none'
arrowHead = 'none'
elif((goalDimName == 'requirement') and (subGoalDimName == 'usecase')):
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',label=' ',height='.2',width='.2'))
goalEdge = pydot.Edge(refNodeName,goalName,dir=assocDirection,arrowhead=arrowHead,weight='1')
if ((refNodeName,goalName) not in edgeSet):
self.theGraph.add_edge(goalEdge)
edgeSet.add((refNodeName,goalName))
refNodes.add(refNodeName)
if ((subGoalName,refNodeName) not in edgeSet):
self.theGraph.add_edge(pydot.Edge(subGoalName,refNodeName,dir='none',weight='1',URL=objtUrl))
edgeSet.add((subGoalName,refNodeName))
else:
pass
def buildTaskModel(self):
self.nodeNameSet = set([])
edgeSet = set([])
fontSize = '7.5'
for association in self.theAssociations:
goalName = association.goal()
subGoalName = association.subGoal()
goalDimName = association.goalDimension()
subGoalDimName = association.subGoalDimension()
assocLabel = association.rationale()
fontColour = 'black'
edgeColour = 'black'
edgeStyle = 'solid'
assocDir = 'none'
arrowHead = 'none'
arrowTail = 'none'
assocType = association.type()
if (self.theGoalName != '' and goalName not in self.nodeNameSet):
self.buildNode(goalDimName,goalName)
self.nodeNameSet.add(goalName)
if (self.theGoalName != '' and subGoalName not in self.nodeNameSet):
self.buildNode(subGoalDimName,subGoalName)
self.nodeNameSet.add(subGoalName)
if (assocType in ('misusecasethreatasset_association','misusecasevulnerabilityasset_association','taskmisusecasethreat_association','taskmisusecasevulnerability_association')):
fontColour = 'red'
edgeColour = 'red'
assocDir = 'forward'
arrowHead = 'vee'
elif (assocType in ('misusecasethreatmitigation_association','misusecasevulnerabilitymitigation_association','taskmisusecasemitigation_association')):
fontColour = 'green'
edgeColour = 'green'
assocDir = 'forward'
arrowHead = 'vee'
elif (assocType == 'taskasset_association'):
fontColour = 'blue'
edgeColour = 'blue'
arrowTail = 'vee'
elif (assocType == 'rolepersona_association'):
arrowHead = 'empty'
assocDir = 'forward'
elif (assocType == 'roleattacker_association'):
arrowHead = 'empty'
assocDir = 'forward'
if (assocType in ('misusecasethreatasset_association','misusecasevulnerabilityasset_association','taskasset_association')):
arrowHead = 'none'
arrowTail = 'vee'
if (assocType == 'taskmisusecasemitigation_association'):
arrowHead = 'none'
arrowTail = 'vee'
if (assocType == 'usecasetask_association'):
arrowTail = 'vee'
edgeStyle = 'dashed'
objtUrl = goalDimName + '#' + subGoalDimName + '#' + assocType
if ((subGoalName,goalName,assocLabel) not in edgeSet):
if (assocLabel == ''):
assocLabel = ' '
self.theGraph.add_edge(pydot.Edge(subGoalName,goalName,style=edgeStyle,dir=assocDir,arrowhead=arrowHead,arrowtail=arrowTail,label=assocLabel,fontsize=fontSize,weight='1',fontcolor=fontColour,color=edgeColour,URL=objtUrl))
edgeSet.add((subGoalName,goalName,assocLabel))
def graph(self):
try:
elements = []
if (self.theKaosModel == 'goal' and self.theGoalName == ''):
elements = self.dbProxy.goalModelElements(self.theEnvironmentName)
elif (self.theKaosModel == 'obstacle' and self.theGoalName == ''):
elements = self.dbProxy.obstacleModelElements(self.theEnvironmentName)
elif (self.theKaosModel == 'responsibility' and self.theGoalName == ''):
elements = self.dbProxy.responsibilityModelElements(self.theEnvironmentName)
elif (self.theKaosModel == 'task' and self.theGoalName == ''):
elements = self.dbProxy.taskModelElements(self.theEnvironmentName)
for element in elements:
self.buildNode(element[0],element[1])
if ((self.theKaosModel == 'goal') or (self.theKaosModel == 'responsibility') or (self.theKaosModel == 'obstacle')):
self.buildGoalModel()
elif (self.theKaosModel == 'template_goal'):
self.buildGoalModel(True)
else:
self.buildTaskModel()
return self.layout()
except DatabaseProxyException as errTxt:
raise ARMException(errTxt)
|
|
"""
This is a convenient container gathering all the main
search methods for the various database tables.
It is intended to be used e.g. as
> from src.utils import search
> match = search.objects(...)
Note that this is not intended to be a complete listing of all search
methods! You need to refer to the respective manager to get all
possible search methods. To get to the managers from your code, import
the database model and call its 'objects' property.
Also remember that all commands in this file return lists (also if
there is only one match) unless noted otherwise.
Example: To reach the search method 'get_object_with_player'
in src/objects/managers.py:
> from src.objects.models import ObjectDB
> match = Object.objects.get_object_with_player(...)
"""
# Import the manager methods to be wrapped
from django.contrib.contenttypes.models import ContentType
# limit symbol import from API
__all__ = ("search_object", "search_player", "search_script",
"search_message", "search_channel", "search_help_entry",
"search_object_tag", "search_script_tag", "search_player_tag",
"search_channel_tag")
# import objects this way to avoid circular import problems
ObjectDB = ContentType.objects.get(app_label="objects", model="objectdb").model_class()
PlayerDB = ContentType.objects.get(app_label="players", model="playerdb").model_class()
ScriptDB = ContentType.objects.get(app_label="scripts", model="scriptdb").model_class()
Msg = ContentType.objects.get(app_label="comms", model="msg").model_class()
Channel = ContentType.objects.get(app_label="comms", model="channeldb").model_class()
HelpEntry = ContentType.objects.get(app_label="help", model="helpentry").model_class()
Tag = ContentType.objects.get(app_label="typeclasses", model="tag").model_class()
#
# Search objects as a character
#
# NOTE: A more powerful wrapper of this method
# is reachable from within each command class
# by using self.caller.search()!
#
# def object_search(self, ostring=None,
# attribute_name=None,
# typeclass=None,
# candidates=None,
# exact=True):
#
# Search globally or in a list of candidates and return results.
# The result is always a list of Objects (or the empty list)
#
# Arguments:
# ostring: (str) The string to compare names against. By default (if
# not attribute_name is set), this will search object.key
# and object.aliases in order. Can also be on the form #dbref,
# which will, if exact=True be matched against primary key.
# attribute_name: (str): Use this named ObjectAttribute to match ostring
# against, instead of the defaults.
# typeclass (str or TypeClass): restrict matches to objects having
# this typeclass. This will help speed up global searches.
# candidates (list obj ObjectDBs): If supplied, search will only be
# performed among the candidates in this list. A common list
# of candidates is the contents of the current location.
# exact (bool): Match names/aliases exactly or partially. Partial
# matching matches the beginning of words in the names/aliases,
# using a matching routine to separate multiple matches in
# names with multiple components (so "bi sw" will match
# "Big sword"). Since this is more expensive than exact
# matching, it is recommended to be used together with
# the objlist keyword to limit the number of possibilities.
# This keyword has no meaning if attribute_name is set.
#
# Returns:
# A list of matching objects (or a list with one unique match)
# def object_search(self, ostring, caller=None,
# candidates=None,
# attribute_name=None):
#
search_object = ObjectDB.objects.object_search
search_objects = search_object
object_search = search_object
objects = search_objects
#
# Search for players
#
# def player_search(self, ostring):
# """
# Searches for a particular player by name or
# database id.
#
# ostring = a string or database id.
# """
search_player = PlayerDB.objects.player_search
search_players = search_player
player_search = search_player
players = search_players
#
# Searching for scripts
#
# def script_search(self, ostring, obj=None, only_timed=False):
# """
# Search for a particular script.
#
# ostring - search criterion - a script ID or key
# obj - limit search to scripts defined on this object
# only_timed - limit search only to scripts that run
# on a timer.
# """
search_script = ScriptDB.objects.script_search
search_scripts = search_script
script_search = search_script
scripts = search_scripts
#
# Searching for communication messages
#
#
# def message_search(self, sender=None, receiver=None, channel=None, freetext=None):
# """
# Search the message database for particular messages. At least one
# of the arguments must be given to do a search.
#
# sender - get messages sent by a particular player
# receiver - get messages received by a certain player
# channel - get messages sent to a particular channel
# freetext - Search for a text string in a message.
# NOTE: This can potentially be slow, so make sure to supply
# one of the other arguments to limit the search.
# """
search_message = Msg.objects.message_search
search_messages = search_message
message_search = search_message
messages = search_messages
#
# Search for Communication Channels
#
# def channel_search(self, ostring)
# """
# Search the channel database for a particular channel.
#
# ostring - the key or database id of the channel.
# """
search_channel = Channel.objects.channel_search
search_channels = search_channel
channel_search = search_channel
channels = search_channels
#
# Find help entry objects.
#
# def search_help(self, ostring, help_category=None):
# """
# Retrieve a search entry object.
#
# ostring - the help topic to look for
# category - limit the search to a particular help topic
# """
search_help_entry = HelpEntry.objects.search_help
search_help_entries = search_help_entry
help_entry_search = search_help_entry
help_entries = search_help_entries
# Locate Attributes
# search_object_attribute(key, category, value, strvalue) (also search_attribute works)
# search_player_attribute(key, category, value, strvalue) (also search_attribute works)
# search_script_attribute(key, category, value, strvalue) (also search_attribute works)
# search_channel_attribute(key, category, value, strvalue) (also search_attribute works)
# Note that these return the object attached to the Attribute,
# not the attribute object itself (this is usually what you want)
def search_object_attribute(key=None, category=None, value=None, strvalue=None):
return ObjectDB.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
def search_player_attribute(key=None, category=None, value=None, strvalue=None):
return PlayerDB.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
def search_script_attribute(key=None, category=None, value=None, strvalue=None):
return ScriptDB.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
def search_channel_attribute(key=None, category=None, value=None, strvalue=None):
return Channel.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
# search for attribute objects
search_attribute_object = ObjectDB.objects.get_attribute
# Locate Tags
# search_object_tag(key=None, category=None) (also search_tag works)
# search_player_tag(key=None, category=None)
# search_script_tag(key=None, category=None)
# search_channel_tag(key=None, category=None)
# Note that this returns the object attached to the tag, not the tag
# object itself (this is usually what you want)
def search_object_tag(key=None, category=None):
return ObjectDB.objects.get_by_tag(key=key, category=category)
search_tag = search_object_tag # this is the most common case
def search_player_tag(key=None, category=None):
return PlayerDB.objects.get_by_tag(key=key, category=category)
def search_script_tag(key=None, category=None):
return ScriptDB.objects.get_by_tag(key=key, category=category)
def search_channel_tag(key=None, category=None):
return Channel.objects.get_by_tag(key=key, category=category)
# search for tag objects
search_tag_object = ObjectDB.objects.get_tag
|
|
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import kinesis_backends
class KinesisResponse(BaseResponse):
@property
def parameters(self):
return json.loads(self.body)
@property
def kinesis_backend(self):
return kinesis_backends[self.region]
@property
def is_firehose(self):
host = self.headers.get('host') or self.headers['Host']
return host.startswith('firehose') or 'firehose' in self.headers.get('Authorization', '')
def create_stream(self):
stream_name = self.parameters.get('StreamName')
shard_count = self.parameters.get('ShardCount')
self.kinesis_backend.create_stream(
stream_name, shard_count, self.region)
return ""
def describe_stream(self):
stream_name = self.parameters.get('StreamName')
stream = self.kinesis_backend.describe_stream(stream_name)
return json.dumps(stream.to_json())
def list_streams(self):
streams = self.kinesis_backend.list_streams()
return json.dumps({
"HasMoreStreams": False,
"StreamNames": [stream.stream_name for stream in streams],
})
def delete_stream(self):
stream_name = self.parameters.get("StreamName")
self.kinesis_backend.delete_stream(stream_name)
return ""
def get_shard_iterator(self):
stream_name = self.parameters.get("StreamName")
shard_id = self.parameters.get("ShardId")
shard_iterator_type = self.parameters.get("ShardIteratorType")
starting_sequence_number = self.parameters.get(
"StartingSequenceNumber")
shard_iterator = self.kinesis_backend.get_shard_iterator(
stream_name, shard_id, shard_iterator_type, starting_sequence_number,
)
return json.dumps({
"ShardIterator": shard_iterator
})
def get_records(self):
shard_iterator = self.parameters.get("ShardIterator")
limit = self.parameters.get("Limit")
next_shard_iterator, records = self.kinesis_backend.get_records(
shard_iterator, limit)
return json.dumps({
"NextShardIterator": next_shard_iterator,
"Records": [record.to_json() for record in records]
})
def put_record(self):
if self.is_firehose:
return self.firehose_put_record()
stream_name = self.parameters.get("StreamName")
partition_key = self.parameters.get("PartitionKey")
explicit_hash_key = self.parameters.get("ExplicitHashKey")
sequence_number_for_ordering = self.parameters.get(
"SequenceNumberForOrdering")
data = self.parameters.get("Data")
sequence_number, shard_id = self.kinesis_backend.put_record(
stream_name, partition_key, explicit_hash_key, sequence_number_for_ordering, data
)
return json.dumps({
"SequenceNumber": sequence_number,
"ShardId": shard_id,
})
def put_records(self):
if self.is_firehose:
return self.put_record_batch()
stream_name = self.parameters.get("StreamName")
records = self.parameters.get("Records")
response = self.kinesis_backend.put_records(
stream_name, records
)
return json.dumps(response)
def split_shard(self):
stream_name = self.parameters.get("StreamName")
shard_to_split = self.parameters.get("ShardToSplit")
new_starting_hash_key = self.parameters.get("NewStartingHashKey")
self.kinesis_backend.split_shard(
stream_name, shard_to_split, new_starting_hash_key
)
return ""
def merge_shards(self):
stream_name = self.parameters.get("StreamName")
shard_to_merge = self.parameters.get("ShardToMerge")
adjacent_shard_to_merge = self.parameters.get("AdjacentShardToMerge")
self.kinesis_backend.merge_shards(
stream_name, shard_to_merge, adjacent_shard_to_merge
)
return ""
''' Firehose '''
def create_delivery_stream(self):
stream_name = self.parameters['DeliveryStreamName']
redshift_config = self.parameters.get(
'RedshiftDestinationConfiguration')
if redshift_config:
redshift_s3_config = redshift_config['S3Configuration']
stream_kwargs = {
'redshift_username': redshift_config['Username'],
'redshift_password': redshift_config['Password'],
'redshift_jdbc_url': redshift_config['ClusterJDBCURL'],
'redshift_role_arn': redshift_config['RoleARN'],
'redshift_copy_command': redshift_config['CopyCommand'],
'redshift_s3_role_arn': redshift_s3_config['RoleARN'],
'redshift_s3_bucket_arn': redshift_s3_config['BucketARN'],
'redshift_s3_prefix': redshift_s3_config['Prefix'],
'redshift_s3_compression_format': redshift_s3_config.get('CompressionFormat'),
'redshift_s3_buffering_hings': redshift_s3_config['BufferingHints'],
}
else:
# S3 Config
s3_config = self.parameters['S3DestinationConfiguration']
stream_kwargs = {
's3_role_arn': s3_config['RoleARN'],
's3_bucket_arn': s3_config['BucketARN'],
's3_prefix': s3_config['Prefix'],
's3_compression_format': s3_config.get('CompressionFormat'),
's3_buffering_hings': s3_config['BufferingHints'],
}
stream = self.kinesis_backend.create_delivery_stream(
stream_name, **stream_kwargs)
return json.dumps({
'DeliveryStreamARN': stream.arn
})
def describe_delivery_stream(self):
stream_name = self.parameters["DeliveryStreamName"]
stream = self.kinesis_backend.get_delivery_stream(stream_name)
return json.dumps(stream.to_dict())
def list_delivery_streams(self):
streams = self.kinesis_backend.list_delivery_streams()
return json.dumps({
"DeliveryStreamNames": [
stream.name for stream in streams
],
"HasMoreDeliveryStreams": False
})
def delete_delivery_stream(self):
stream_name = self.parameters['DeliveryStreamName']
self.kinesis_backend.delete_delivery_stream(stream_name)
return json.dumps({})
def firehose_put_record(self):
stream_name = self.parameters['DeliveryStreamName']
record_data = self.parameters['Record']['Data']
record = self.kinesis_backend.put_firehose_record(
stream_name, record_data)
return json.dumps({
"RecordId": record.record_id,
})
def put_record_batch(self):
stream_name = self.parameters['DeliveryStreamName']
records = self.parameters['Records']
request_responses = []
for record in records:
record_response = self.kinesis_backend.put_firehose_record(
stream_name, record['Data'])
request_responses.append({
"RecordId": record_response.record_id
})
return json.dumps({
"FailedPutCount": 0,
"RequestResponses": request_responses,
})
def add_tags_to_stream(self):
stream_name = self.parameters.get('StreamName')
tags = self.parameters.get('Tags')
self.kinesis_backend.add_tags_to_stream(stream_name, tags)
return json.dumps({})
def list_tags_for_stream(self):
stream_name = self.parameters.get('StreamName')
exclusive_start_tag_key = self.parameters.get('ExclusiveStartTagKey')
limit = self.parameters.get('Limit')
response = self.kinesis_backend.list_tags_for_stream(
stream_name, exclusive_start_tag_key, limit)
return json.dumps(response)
def remove_tags_from_stream(self):
stream_name = self.parameters.get('StreamName')
tag_keys = self.parameters.get('TagKeys')
self.kinesis_backend.remove_tags_from_stream(stream_name, tag_keys)
return json.dumps({})
|
|
"""
Nginx Blueprint
===============
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.nginx
settings:
nginx:
source_version: 1.4.6-1ubuntu3.3 # Required if installed from source (see modules)
sites: # List of sites/templates in `sites-available` folder to enable (Optional)
- foo # Template name, with or without .conf extension
- bar
# auto_disable_sites: true # Auto disable sites not specified in `sites` setting (Default: true)
# modules: # If present, nginx will be built and installed from source with these modules
# - rtmp
# - vod
"""
import os
from fabric.context_managers import cd
from fabric.contrib import files
from fabric.decorators import task
from fabric.utils import warn
from refabric.api import run, info
from refabric.context_managers import sudo, silent
from refabric.contrib import blueprints
from . import debian
__all__ = ['start', 'stop', 'restart', 'reload', 'setup', 'configure',
'enable', 'disable', 'tail']
blueprint = blueprints.get(__name__)
nginx_root = '/etc/nginx/'
sites_available_path = os.path.join(nginx_root, 'sites-available')
sites_enabled_path = os.path.join(nginx_root, 'sites-enabled')
start = debian.service_task('nginx', 'start')
stop = debian.service_task('nginx', 'stop')
restart = debian.service_task('nginx', 'restart')
reload = debian.service_task('nginx', 'reload')
@task
def setup():
"""
Install and configure nginx
"""
install()
configure()
restart()
def install():
if blueprint.get('modules'):
install_from_source()
else:
with sudo():
debian.apt_get('install', 'nginx', 'nginx-extras')
def install_from_source():
from blues import debian
with sudo():
debian.apt_get_update()
# Install dependencies
packages = ('build-essential', 'libpcre3', 'libpcre3-dev',
'libssl-dev', 'dpkg-dev', 'git', 'software-properties-common')
debian.apt_get('install', *packages)
# Setup nginx source
nginx_full_distro_version = blueprint.get('source_version')
if not nginx_full_distro_version:
raise TypeError('You are installing from nginx from source, please specify source_version')
nginx_version, nginx_distro_version = nginx_full_distro_version.split('-')
nginx_source_path = '/usr/src/nginx'
nginx_source_version_path = os.path.join(nginx_source_path, 'nginx-{}'.format(nginx_version))
nginx_source_module_path = os.path.join(nginx_source_version_path, 'debian/modules/')
debian.mkdir(nginx_source_path)
with cd(nginx_source_path):
debian.apt_get('source', 'nginx={}'.format(nginx_full_distro_version))
debian.apt_get('build-dep', '-y nginx={}'.format(nginx_full_distro_version))
# Get wanted nginx modules
nginx_modules = blueprint.get('modules')
if 'rtmp' in nginx_modules:
# Download nginx-rtmp module
nginx_rtmp_version = '1.1.7'
nginx_rtmp_module_path = os.path.join(nginx_source_module_path, 'nginx-rtmp-module')
nginx_rtmp_module_version_path = os.path.join(nginx_source_module_path,
'nginx-rtmp-module-{}'.format(nginx_rtmp_version))
archive_file = '{}.tar.gz'.format(nginx_rtmp_version)
run('wget -P /tmp/ https://github.com/arut/nginx-rtmp-module/archive/v{f}'.format(
f=archive_file))
# Unpackage to nginx source directory
run('tar xzf /tmp/v{f} -C {nginx_source_module_path}'.format(
f=archive_file, nginx_source_module_path=nginx_source_module_path))
# Set up nginx rtmp version symlink
debian.ln(nginx_rtmp_module_version_path, nginx_rtmp_module_path)
# Configure nginx dkpg, TODO: Do not add module if present in rules
rtmp_module_string = '"s/^common_configure_flags := /common_configure_flags := \\\\\\\\\\\\\\\\\\n\\t\\t\\t--add-module=\\$\(MODULESDIR\)\/nginx-rtmp-module /g"'
run('sed -ri {} {}'.format(rtmp_module_string,
os.path.join(nginx_source_version_path, 'debian/rules')))
# Install useful tools, like ffmpeg
debian.add_apt_repository('ppa:mc3man/trusty-media', src=True)
debian.apt_get_update()
debian.apt_get('install', 'libfaac-dev', 'ffmpeg', 'zlib1g-dev', 'libjpeg8-dev')
if 'vod' in nginx_modules:
# Download nginx-rtmp module
# nginx_vod_version = '2ac3bfeffab2fa1b46923236b7fd0ea15616a417' # "Latest" git commit
# nginx_vod_version = '88160cacd0d9789d84605425b78e3f494950529c' # Git commit pre mms playready
nginx_vod_version = 'master'
nginx_vod_module_path = os.path.join(nginx_source_module_path, 'nginx-vod-module')
nginx_vod_module_version_path = os.path.join(nginx_source_module_path,
'nginx-vod-module-{}'.format(nginx_vod_version))
archive_file = '{}.tar.gz'.format(nginx_vod_version)
debian.rm(nginx_vod_module_version_path, recursive=True)
run('wget -O /tmp/{f} https://github.com/5monkeys/nginx-vod-module/archive/{f}'.format(
f=archive_file))
# Unpackage to nginx source directory
run('tar xzf /tmp/{f} -C {nginx_source_module_path}'.format(
f=archive_file, nginx_source_module_path=nginx_source_module_path))
# Set up nginx rtmp version symlink
debian.ln(nginx_vod_module_version_path, nginx_vod_module_path)
# Configure nginx dkpg, TODO: Do not add module if present in rules
vod_module_string = '"s/^common_configure_flags := /common_configure_flags := \\\\\\\\\\\\\\\\\\n\\t\\t\\t--add-module=\\$\(MODULESDIR\)\/nginx-vod-module /g"'
run('sed -ri {} {}'.format(vod_module_string,
os.path.join(nginx_source_version_path, 'debian/rules')))
# Setup nginx
with cd(nginx_source_version_path):
run('dpkg-buildpackage -b')
with cd(nginx_source_path):
run('dpkg --install nginx-common_{nginx_full_distro_version}_all.deb nginx-extras_{nginx_full_distro_version}_amd64.deb'.format(
nginx_full_distro_version=nginx_full_distro_version))
@task
def configure():
"""
Configure nginx and enable/disable sites
"""
with sudo():
# Upload templates
context = {
'num_cores': debian.nproc(),
'ipv4_addresses': debian.get_ipv4_addresses(),
}
uploads = blueprint.upload('./', nginx_root, context)
# Disable previously enabled sites not configured sites-enabled
changes = []
sites = blueprint.get('sites') or []
auto_disable_sites = blueprint.get('auto_disable_sites', True)
if auto_disable_sites:
with silent():
enabled_site_links = run('ls {}'.format(sites_enabled_path)).split()
for link in enabled_site_links:
link_name = os.path.splitext(link)[0] # Without extension
if link not in sites and link_name not in sites:
changed = disable(link, do_reload=False)
changes.append(changed)
### Enable sites from settings
for site in sites:
changed = enable(site, do_reload=False)
changes.append(changed)
### Reload nginx if new templates or any site has been enabled/disabled
if uploads or any(changes):
reload()
@task
def disable(site, do_reload=True):
"""
Disable site
:param site: Site to disable
:param do_reload: Reload nginx service
:return: Got disabled?
"""
disabled = False
site = site if site.endswith('.conf') or site == 'default' else '{}.conf'.format(site)
with sudo(), cd(sites_enabled_path):
if files.is_link(site):
info('Disabling site: {}', site)
with silent():
debian.rm(site)
disabled = True
if do_reload:
reload()
else:
warn('Invalid site: {}'.format(site))
return disabled
@task
def enable(site, do_reload=True):
"""
Enable site
:param site: Site to enable
:param do_reload: Reload nginx service
:return: Got enabled?
"""
enabled = False
if not (site.endswith('.conf') or site == 'default'):
site = '{}.conf'.format(site)
with sudo():
available_site = os.path.join(sites_available_path, site)
if not files.exists(available_site):
warn('Invalid site: {}'.format(site))
else:
with cd(sites_enabled_path):
if not files.exists(site):
info('Enabling site: {}', site)
with silent():
debian.ln(available_site, site)
enabled = True
if do_reload:
reload()
return enabled
@task
def tail(log_name='error'):
log_dir = '/var/log/nginx'
run('tail -f {}'.format(
os.path.join(log_dir,
'{}.log'.format(log_name))))
|
|
valid_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
numbers = '0123456789'
candidates = ['Donald Trump', 'Secretary Hillary Clinton',
'Governor Gary Johnson', 'Dr. Jill Stein']
def extract_mentions(text):
''' (str) -> list of str
Returns a list of strings representing the twitter usernames
mentioned in the tweet's text, in the order they appear and including
repeated mentions.
Precondition: len(text) < 140
>>> extract_mentions('@realDonaldTrump, @SenSanders, @HillaryClinton')
['realDonaldTrump', 'SenSanders', 'HillaryClinton']
>>> extract_mentions('@realDonaldTrump #MAGA #MAGA #MAGA')
['realDonaldTrump']
>>> extract_mentions('Voter recount? @SenSanders?')
['SenSanders']
>>> extract_mentions('@realDonaldTrump abcdefg @realDonaldTrump')
['realDonaldTrump', 'realDonaldTrump']
'''
mentions = []
tweet = text.split()
for i in range(len(tweet)):
if tweet[i][0] == '@':
candidate_text = tweet[i][1:]
mention = ''
for i in range(len(candidate_text)):
char = candidate_text[i]
if char in valid_chars:
mention += char
mentions.append(mention)
return mentions
def extract_hashtags(text):
''' (str) -> list of str
Returns a list of strings representing the hashtags used in the
tweet's text, in the order they appear and excluding repeats.
Precondition: len(text) < 140
>>> extract_hashtags('#MAGA #MAGA #MAGA @realDonaldTrump 2016!!!')
['MAGA']
>>> extract_hashtags('#2016Election #Election2016 Go out and vote!')
['Election2016']
>>> extract_hashtags('this tweet is full of errors #abc@aa@#abcd^^^')
['abc', 'abcd']
'''
hashtags = []
tweet = text.split()
# For entries in tweet which have multiple hashtag characters,
# we parse through the entry, extracting all possible hashtags.
tweet = helper_1(tweet)
# For each entry in tweet, extract the hashtags.
for i in range(len(tweet)):
candidate = tweet[i]
if candidate[0] == '#':
candidate_text = candidate[1:]
if candidate_text[1] not in numbers:
hashtag = ''
for i in range(len(candidate_text)):
char = candidate_text[i]
if char in valid_chars:
hashtag += char
else:
break
if hashtag not in hashtags:
hashtags.append(hashtag)
return hashtags
def helper_1(tweet):
''' (list of str) -> list of str
Returns a list of strings which are the former entries of the tweet list
that have been formatted to split each occurence of '#' in each entry.
>>> helper_1(['#abc@aa@#abcd^^^', '#abc'])
['#abc@aa@', '#abcd^^^', '#abc']
'''
new_tweet = []
for i in range(len(tweet)):
candidate = tweet[i]
if candidate.count('#') > 1:
candidate = candidate.replace('#', ' ')
candidate_list = candidate.split()
for i in range(len(candidate_list)):
candidate_list[i] = '#' + candidate_list[i]
new_tweet.append(candidate_list[i])
else:
new_tweet.append(candidate)
return new_tweet
def count_words(text, dictionary):
''' (str, dict of {str, int}) -> None
Modifies a dictionary of lowercase words as keys (which are words taken
from the tweet text, made lowercase and stripped of non-alphanumeric
characters. URLs are ignored) and their respective number of occurences
in the tweet text as their assigned values.
>>> t = "@utmandrew Don't you wish you could vote? #MakeAmericaGreatAgain"
>>> dict = {'you': 0, 'dont': 0, 'wish': 0, 'could': 0, 'vote': 0}
>>> count_words(t, dict)
>>> dict == {'vote': 1, 'could': 1, 'you': 2, 'dont': 1, 'wish': 1}
True
>>> t = "@realDonaldTrump CHECK this out! http://www.google.com"
>>> dict = {}
>>> count_words(t, dict)
>>> dict == {'this': 1, 'check': 1, 'out': 1}
True
'''
illegal_chars = numbers + '_.,;{}[]+=-!$%^&*()`~?|\\/'
tweet = text.split()
tweet_words_final = []
for i in range(len(tweet)):
if tweet[i].startswith('http://'):
break
candidate = tweet[i].replace('\'', '').strip(illegal_chars).lower()
if not (candidate[0] == '#' or candidate[0] == '@'):
tweet_words_final.append(candidate)
for word in tweet_words_final:
if word not in dictionary:
dictionary[word] = 0
dictionary[word] += 1
def common_words(dictionary, N):
''' (dict of {str: int}, int) -> None
Modifies a dictionary of lowercase words (as in count_words) by only
including the largest N-many words, for N a positive integer, as sorted
by their respective values. Note: If including the N-th word would yield
a dictionary of length > N, then we omit including this word and any other
words that share the same key and lower.
>>> dictionary = {'a': 3, 'b': 3, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7}
>>> common_words(dictionary, 5)
>>> dictionary == {'e': 5, 'd': 4, 'f': 6, 'g': 7}
True
>>> dictionary2 = {'a': 3, 'b': 3, 'c': 3, 'd': 3, 'e': 3, 'f': 3, 'g': 3}
>>> common_words(dictionary2, 5)
>>> dictionary2 == {}
True
'''
if N < len(dictionary.keys()):
new_dict = dict(dictionary)
for i in range(N):
value = new_dict[max(new_dict, key=lambda i: new_dict[i])]
key = list(new_dict.keys())[list(new_dict.values()).index(value)]
num_occurences = helper_2(new_dict)[value]
if (N - i) >= num_occurences:
del new_dict[key]
for key in new_dict:
if key in dictionary:
del dictionary[key]
def helper_2(dictionary):
''' (dict of {type1: type2}) -> (dict of type {type2: int})
Returns a dictionary where the keys are the values of the argument
dictionary, and the values are its frequency (number of occurences) in
the argument dictionary.
>>> dictionary = {'a': 1, 'b': 1, 'c': 2, 'd': 2, 'e': 3, 'f': 3, 'g': 3}
>>> helper_2(dictionary) == {1: 2, 2: 2, 3: 3}
True
'''
dict_count = {}
for value in dictionary.values():
if value not in dict_count:
dict_count[value] = 0
dict_count[value] += 1
return dict_count
def read_tweets(file):
''' (file open for reading) -> dict of {str: list of tweet tuples}
Returns a dictionary of candidates as keys and tweet information as values,
where the tweet information is in the form of tweet tuples where each tuple
represents data retrieved from a single tweet in the format:
(candidate, tweet text, date, source, # favorites, # retweets).
All info retrieved from a file of tweet data.
Precondition: for each tweet tuple, the values for date, # favorites
and # retweets should be of type int, while values for candidate and
tweet text should be of type str.
How do I doctest this?
'''
tweets = {}
line = file.readline()
while line != '':
if line.strip()[:-1] in candidates:
candidate = line.strip()[:-1]
tweets[candidate] = []
line = file.readline()
data = line.split(',')
if [type(data[i]) == int for i in [0, 1, 4, 5]]:
del data[0], data[1]
for i in [0, 2, 3]:
data[i] = int(data[i])
data.insert(0, candidate)
line = file.readline()
tweet_text = ''
while line != '<<<EOT\n':
tweet_text += line.strip() + ' '
line = file.readline()
data.insert(1, tweet_text)
info = ()
for i in range(len(data)):
if type(data[i]) == int:
info += data[i],
else:
info += data[i].strip(),
tweets[candidate].append(info)
line = file.readline()
return tweets
def most_popular(tweets, start, end):
''' (dict of {str: list of tweet tuples}, int, int) -> str
Returns a string representing the most popular candidate in the dictionary
of tweets that have posted between the dates of start and end. Popularity
is measured by the sum of favorites and retweets for each tweet written by
the candidate between the two dates.
'''
# make an empty dictionary
# for each key in tweets, make a sum = 0
# for each tuple associated to the key, find the ones where
# int1 <= tuple[2] <= int2 and add the RT and FAV count to the sum
# enter the key and the final sum to the empty dictionary
# return the key associated to the largest entry in the dictionary
dict = {}
for key, value in tweets.items():
sum = 0
tuples = tweets[key]
for i in range(len(tuples)):
if start <= tuples[i][2] <= end:
sum += tuples[i][4] + tuples[i][5]
if sum not in dict.values():
dict[key] = sum
else:
result = 'Tie'
top = dict[max(dict, key=lambda i: dict[i])]
most_popular = list(dict.keys())[list(dict.values()).index(top)]
if helper_3(dict, top) > 1:
return 'Tie'
return most_popular
def helper_3(collection, value):
''' (data of type dict or list, object of arbitrary type) -> int
Returns the number of instances the entry value (of arbitrary type)
occurs in collection, which is either a dict or a list.
>>> helper_3(['a', 'b', 'a'], 'a')
2
>>> helper_3({1:2, 2:3, 1:3}, 3)
2
'''
count = 0
if type(collection) == dict:
for key in collection:
if collection[key] == value:
count += 1
elif type(collection) == list:
for item in collection:
if item == value:
count += 1
return count
def detect_author(tweets, tweet):
''' (dict of {str: list of tweet tuples}, str) -> str
Returns the probable author of a tweet based on the tweets passed in
as an argument (which is the output of read_tweets of some tweet data).
Probable author is found by retrieving the hashtags found in the tweet
and comparing that to each candidate's lists of all hashtags they've used
and all unique hashtags they've used. If the hashtag is not unique, then
the candidate who used that hashtag the most will be returned as the
probable author.
'''
# check if the tweet string contains any hashtags. If not, return 'Unknown'
# If there are hashtags, extract them from the tweet and into their own
# list. create a new dictionary. now go through each key in the argument
# dictionary. For each key in the old-dictionary (i.e. for each candidate),
# get all their hashtags and assign that as the first tuple value for the
# candidate's key in the new dictionary. the second tuple entry should be
# the candidate's unique hashtags. this is our candidate hashtag library.
# Formal algorithm:
# for each hashtag in the tweet argument hashtags, check if it appears in
# the unique hashtags list for each candidate. If it does, then immediately
# return that candidate. If not, check how many times that hashtag occurs
# in the candidate's non-unique hashtag list. create a counter and add up
# all the hashtag matches for each candidate for each hashtag. Make a third
# dictionary matching the candidate to their hashtag count.
# return the candidate with the highest hashtag count.
tweet_hashtags = extract_hashtags(tweet)
candidate_hashtags = {}
counts = {}
all_hashtags = helper_5(tweets)
unique_hashtags = helper_4(all_hashtags)
if tweet_hashtags != []:
for key, value in tweets.items():
candidate_hashtags[key] = (all_hashtags[key], unique_hashtags[key])
counts[key] = 0
for hashtag in tweet_hashtags:
counter = 0
for key, value in candidate_hashtags.items():
if hashtag in candidate_hashtags[key][1]:
return key
else:
counter += helper_3(candidate_hashtags[key][0], hashtag)
counts[key] += counter
max_value = counts[max(counts, key=lambda i: counts[i])]
author = list(counts.keys())[list(counts.values()).index(max_value)]
return author
else:
return 'Unknown'
def helper_4(candidate_hashtags):
'''
Pass a dictionary of candidates as keys and their respective hashtags as
their associated values. Returns a dictionary of candidates as keys and
the candidates' unique hashtags as their respective values.
'''
unique_dict = {}
for key, value in candidate_hashtags.items():
unique_dict[key] = []
candidate = candidate_hashtags[key]
for i in range(len(candidate)):
hashtag = candidate[i]
for k in candidate_hashtags.keys():
if k != key:
if hashtag not in candidate_hashtags[k]:
unique_dict[key].append(hashtag)
return unique_dict
def helper_5(tweets):
'''
Pass a dictionary obtained from read_tweets. Returns a dictionary
of candidates as keys and all the hashtags used by the candidate as their
respective values.
'''
tweets_dict = {}
for key, value in tweets.items():
tweets_dict[key] = []
tuples = tweets[key]
for i in range(len(tuples)):
tweets_dict[key] += extract_hashtags(tuples[i][1])
return tweets_dict
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
#!/usr/bin/env python3
# tabletomallet_firstfic.py
# This script selects & prepares data for use by MALLET. It starts from the
# tabular data produced by jsontotable5.py, and slightly changes format.
# More importantly, it selects volumes distributed as evenly as possible across
# time, and ensures that the volumes for all our preregistered hypotheses
# will be present.
import csv, random, sys
import pandas as pd
# Our general strategy is to take 1000 books from each decade between the 1780s
# and the 2000s (where 1000 books are available, which they aren't until the 1850s).
# So we start by organizing books by decade:
decades = dict()
for floor in range(1780, 2010, 10):
decades[floor] = set()
with open('../../metadata/filtered_fiction_plus_18c.tsv', encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
# there are two different forms of id volumes can have,
# because 19c stories are often multi-volume
# and so I assigned them new docids
inferreddate = int(row['inferreddate'])
decfloor = 10 * (inferreddate // 10)
docid = row['docid']
if decfloor in decades:
decades[decfloor].add(docid)
# Then sample 1000 from each decade.
randomsample = set()
for floor, available in decades.items():
if len(available) < 500:
k = len(available)
print(floor, k)
else:
k = 500
selected = random.sample(available, k)
randomsample = randomsample.union(selected)
# We also want to ensure that we have all the books needed to test preregistered hypotheses.
# So let's add those too.
def getdoc(anid):
'''
Gets the docid part of a character id
'''
if '|' in anid:
thedoc = anid.split('|')[0]
elif '_' in anid:
thedoc = anid.split('_')[0]
else:
print('error', anid)
thedoc = anid
return thedoc
specialids = set()
with open('../../evaluation/hypotheses.tsv', encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
ids = [row['firstsim'], row['secondsim'], row['distractor']]
for anid in ids:
docid = getdoc(anid)
specialids.add(docid)
# In addition, there are a few characters who are known to be split across a couple
# different names. We're going to unify these in data prep.
# Our strategy is to create a dictionary that translates the supplemental character
# ids into main character ids. This is only necessary in 15 cases.
char_translator = dict()
to_supplement = set()
with open('../../evaluation/newcharacters.tsv', encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
supp = row['supplementalcharid']
if len(supp) > 1:
char_translator[supp] = row['charid']
to_supplement.add(row['charid'])
print('Translator for ', len(char_translator))
sources = ['/Users/tunder/data/character_table_18c19c.tsv',
'/Users/tunder/data/character_table_post1900.tsv']
malletout = '/Users/tunder/data/bioficchars.txt'
errors = 0
errorset = {}
lines = []
special_lines = []
wordholding = dict()
labelholding = dict()
for s in sources:
with open(s, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
docid = fields[0]
if docid in randomsample or docid in specialids:
charid = fields[2]
date = fields[4]
gender = fields[3]
words = fields[5]
label = 'fic' + date + gender
if charid in char_translator or charid in to_supplement:
words = fields[5].split()
if charid in char_translator:
hold_id = char_translator[charid]
else:
hold_id = charid
if hold_id not in wordholding:
wordholding[hold_id] = []
wordholding[hold_id].extend(words)
labelholding[hold_id] = label
else:
outline = ' '.join([charid, label, words]) + '\n'
if docid in specialids:
special_lines.append(outline)
else:
lines.append(outline)
if len(lines) > 1000:
with open(malletout, mode = 'a', encoding = 'utf-8') as f:
for l in lines:
f.write(l)
lines = []
with open(malletout, mode = 'a', encoding = 'utf-8') as f:
for l in lines:
f.write(l)
for anid in to_supplement:
outline = ' '.join([anid, labelholding[anid], ' '.join(wordholding[anid])]) + '\n'
special_lines.append(outline)
with open(malletout, mode = 'a', encoding = 'utf-8') as f:
for l in special_lines:
f.write(l)
print("Total volumes: ", len(randomsample) + len(specialids))
print()
print('Starting to get biographies.')
biosources = ['../../data/all_post23bio_Sep11.tsv',
'../../data/all_pre23bio_new.tsv']
biometa = pd.read_csv('../../metadata/allparsedbio.tsv', sep = '\t', index_col = 'docid')
def getdecade(date):
return 10 * (date // 10)
biometa = biometa.assign(decade = biometa.inferreddate.map(getdecade))
decadegrouping = biometa.groupby('decade')
biosample = set()
for dec, group in decadegrouping:
if dec < 1780 or dec > 2000:
continue
available = group.index.tolist()
if len(available) < 500:
k = len(available)
print(floor, k)
else:
k = 500
selected = random.sample(available, k)
biosample = biosample.union(selected)
lines = []
for s in biosources:
with open(s, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
docid = fields[0]
if docid in biosample:
charid = fields[2]
date = fields[5]
gender = fields[3]
words = fields[6]
label = 'bio' + date + gender
outline = ' '.join([charid, label, words]) + '\n'
lines.append(outline)
if len(lines) > 1000:
with open(malletout, mode = 'a', encoding = 'utf-8') as f:
for l in lines:
f.write(l)
lines = []
with open(malletout, mode = 'a', encoding = 'utf-8') as f:
for l in lines:
f.write(l)
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
ignore_incomplete=True,
offset_probability=0.5,
ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 8
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', 'fridge freezer', 800),
('b', 'coffee maker', 512),
('c', 'dish washer', 2000),
('d', 'hair dryer', 256),
('e', 'kettle', 256),
('f', 'oven', 2000),
('g', 'toaster', 256),
('h', 'light', 2000),
('i', 'washer dryer', 2000)
]
for experiment, appliance, seq_length in APPLIANCES[:1]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", '{}', {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=20000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e466.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn.translate import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self,
source_vocab_size,
target_vocab_size,
buckets,
size,
num_layers,
max_gradient_norm,
batch_size,
learning_rate,
learning_rate_decay_factor,
use_lstm=False,
num_samples=512,
forward_only=False,
dtype=tf.float32):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
dtype: the data type to use to store internal variables.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(
float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w_t = tf.get_variable("proj_w", [self.target_vocab_size, size], dtype=dtype)
w = tf.transpose(w_t)
b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)
def sampled_loss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(local_w_t, local_b, local_inputs, labels,
num_samples, self.target_vocab_size),
dtype)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode,
dtype=dtype)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(dtype, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.global_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
|
|
"""Server trace events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import enum # pylint: disable=wrong-import-order
import logging
from .. import _events
_LOGGER = logging.getLogger(__name__)
class ServerTraceEvent(_events.TraceEvent):
"""Parent class of all server trace events.
Contains the basic attributes of all events as well as the factory method
`from_data` that instanciate an event object from its data representation.
All server event classes must derive from this class.
"""
__slots__ = (
'event_type',
'timestamp',
'source',
'servername',
'payload',
)
def __init__(self,
timestamp=None, source=None, servername=None, payload=None):
self.event_type = ServerTraceEventTypes(self.__class__).name
if timestamp is None:
self.timestamp = None
else:
self.timestamp = float(timestamp)
self.source = source
self.payload = payload
self.servername = servername
@property
@abc.abstractmethod
def event_data(self):
"""Return an event's event_data.
"""
@classmethod
def _class_from_type(cls, event_type):
"""Return the class for a given event_type.
"""
etype = getattr(ServerTraceEventTypes, event_type, None)
if etype is None:
_LOGGER.warning('Unknown event type %r', event_type)
return None
eclass = etype.value
return eclass
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
"""Intantiate an event from given event data.
"""
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass.from_data(
timestamp=timestamp,
source=source,
servername=servername,
event_type=event_type,
event_data=event_data,
payload=payload
)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to parse event type %r:', event_type,
exc_info=True)
event = None
return event
def to_data(self):
"""Return a 6 tuple represtation of an event.
"""
event_data = self.event_data
if event_data is None:
event_data = ''
return (
self.timestamp,
self.source,
self.servername,
self.event_type,
event_data,
self.payload
)
@classmethod
def from_dict(cls, event_data):
"""Instantiate an event from a dict of its data.
"""
event_type = event_data.pop('event_type')
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass(**event_data)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to instanciate event type %r:', event_type,
exc_info=True)
event = None
return event
def to_dict(self):
"""Return a dictionary representation of an event.
"""
return {
k: getattr(self, k)
for k in super(self.__class__, self).__slots__ + self.__slots__
}
class ServerStateTraceEvent(ServerTraceEvent):
"""Event emitted when server state changes.
"""
__slots__ = (
'state',
)
def __init__(self, state,
timestamp=None, source=None, servername=None, payload=None):
super(ServerStateTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
self.state = state
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload,
state=event_data
)
@property
def event_data(self):
return self.state
class ServerBlackoutTraceEvent(ServerTraceEvent):
"""Event emitted when server is blackedout.
"""
__slots__ = (
)
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
@property
def event_data(self):
pass
class ServerBlackoutClearedTraceEvent(ServerTraceEvent):
"""Event emitted when server blackout is cleared.
"""
__slots__ = (
)
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
@property
def event_data(self):
pass
class ServerTraceEventTypes(enum.Enum):
"""Enumeration of all server event type names.
"""
server_state = ServerStateTraceEvent
server_blackout = ServerBlackoutTraceEvent
server_blackout_cleared = ServerBlackoutClearedTraceEvent
class ServerTraceEventHandler(_events.TraceEventHandler):
"""Base class for processing server trace events.
"""
DISPATCH = {
ServerStateTraceEvent:
lambda self, event: self.on_server_state(
when=event.timestamp,
servername=event.servername,
state=event.state
),
ServerBlackoutTraceEvent:
lambda self, event: self.on_server_blackout(
when=event.timestamp,
servername=event.servername
),
ServerBlackoutClearedTraceEvent:
lambda self, event: self.on_server_blackout_cleared(
when=event.timestamp,
servername=event.servername
),
}
def dispatch(self, event):
"""Dispatch event to one of the handler methods.
"""
return self.DISPATCH.get(type(event), None)
@abc.abstractmethod
def on_server_state(self, when, servername, state):
"""Invoked when server state changes.
"""
@abc.abstractmethod
def on_server_blackout(self, when, servername):
"""Invoked when server is blackedout.
"""
@abc.abstractmethod
def on_server_blackout_cleared(self, when, servername):
"""Invoked when server blackout is cleared.
"""
|
|
#!/usr/bin/python
#
# File: FlightProfile.py
# Author: Ellery Chan
# Email: ellery@precisionlightworks.com
# Date: 7 Dec 2015
"""
Display the flight profile of a team's docking attempt.
"""
from __future__ import print_function, division
#from collections import namedtuple
import sys
import os.path
import math
import itertools
import pygame.sprite
import pygame.image
import pygame.font
import pygame.time
import Queue
import qrcode
from station.state import State # TODO: get rid of this dependency!!
from DockSim import DockSim, FlightParams
#----------------------------------------------------------------------------
class Colors(object):
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
CYAN = ( 0, 255, 255)
MAGENTA = (255, 0, 255)
ORANGE = (255, 128, 0)
LIGHT_ORANGE = (255, 180, 52)
#----------------------------------------------------------------------------
class Text(pygame.sprite.DirtySprite):
""" A displayable text object """
LEFT = 0x01
CENTER = 0x02
RIGHT = 0x04
BOTTOM = 0x10
MIDDLE = 0x20
TOP = 0x40
DEFAULT_FONT = 'freesansbold.ttf'
DEFAULT_FONT_SIZE = 100
def __init__(self, pt, value="", size=DEFAULT_FONT_SIZE, font=DEFAULT_FONT,
justify=LEFT|BOTTOM, color=Colors.WHITE, intervalsMs=(1000,0), shrinkToWidth=0):
""" intervalMs is a list of (on, off, on, off, ...) time intervals used to toggle the text visibility """
super(Text, self).__init__()
self.pos = pt
self.pointSize = size
self.fontName = font
self.font = pygame.font.Font(font, self.pointSize)
self.justify = justify
self.color = color
self.image = None # required by sprite.draw()
self.intervalsMs = itertools.cycle(intervalsMs) # create a cycle iterator
self.toggleTimeMs = 0
self.rect = None # required by sprite.draw()
self._value = None
self.shrinkToWidth = shrinkToWidth
self.setValue(value) # sets _value and image
self.visible=0
self.update()
def lineHeight(self):
""" Return the height in pixels of the font text """
return self.font.get_linesize()
def value(self):
""" Return the text string """
return self._value
def setValue(self, value, color=None):
""" Set the text string value, and optionally set the color """
self._value = value
if color:
self.color = color
# Create the text image
# If shrinkToWidth > 0, shrink the pointSize until it fits
while True:
textWidth = self.font.size(self._value)[0]
if self.shrinkToWidth == 0 or textWidth <= self.shrinkToWidth:
break
self.pointSize = int(self.pointSize * self.shrinkToWidth/textWidth)
self.font = pygame.font.Font(self.fontName, self.pointSize)
self.image = self.font.render(self._value, True, self.color)
#print("Text.value: '{}'".format(self._value))
self.dirty = 1
self.rect = self.image.get_rect()
if self.justify & self.LEFT:
self.rect.left = self.pos[0]
elif self.justify & self.CENTER:
self.rect.centerx = self.pos[0]
else: # self.RIGHT
self.rect.right = self.pos[0]
if self.justify & self.BOTTOM:
self.rect.bottom = self.pos[1]
elif self.justify & self.MIDDLE:
self.rect.centery = self.pos[1]
else: # self.TOP
self.rect.top = self.pos[1]
def update(self):
""" Determine the visibility, then draw """
t = pygame.time.get_ticks()
while t >= self.toggleTimeMs:
self.visible = (self.visible + 1) % 2 # toggle between 0 and 1
self.dirty = self.visible + 1 # set to 2 (always dirty) when visible
self.toggleTimeMs = t + next(self.intervalsMs)
#----------------------------------------------------------------------------
class Timer(object):
""" A timer object. Computes elapsed time.
Optionally generates a USEREVENT every tick.
You set the tick interval in tickMs.
"""
def __init__(self, periodMs=44, generateEvents=False):
self.startTimeMs = 0
self.stopTimeMs = 0
self.periodMs = periodMs # time between timer events
self.generateEvents = generateEvents
def isRunning(self):
return self.stopTimeMs < self.startTimeMs
def currentTime(self):
""" Return the actual simulation time if the clock is running.
If the clock has been stopped (by calling stop()), return
the time when stop was called.
"""
if self.isRunning():
return pygame.time.get_ticks()
else:
return self.stopTimeMs
def start(self):
""" Start the timer, and generate USEREVENTs """
self.stopTimeMs = 0
self.startTimeMs = pygame.time.get_ticks()
# Generate a USEREVENT every periodMs milliseconds
if self.generateEvents:
pygame.time.set_timer(pygame.USEREVENT, self.periodMs)
def stop(self):
""" Stop the timer, and stop generating USEREVENTs """
if self.isRunning():
self.stopTimeMs = pygame.time.get_ticks()
if self.generateEvents:
pygame.time.set_timer(pygame.USEREVENT, 0)
def elapsedMs(self):
""" Return the elapsed time in milliseconds """
return self.currentTime() - self.startTimeMs
def elapsedSec(self):
""" Return the elapsed time in seconds """
return self.elapsedMs()/1000.0
#----------------------------------------------------------------------------
class Clock(Text):
""" An Text object that knows how to format a time value """
def __init__(self, pt, value=0, size=100, justify=Text.BOTTOM|Text.LEFT, **kwargs):
super(Clock, self).__init__(pt, value=value, size=size, justify=justify, **kwargs)
def setValue(self, value=None):
""" Set the value of the clock in seconds
"""
tSeconds = float(value)
super(Clock, self).setValue("{:02d}:{:02d}:{:02d}.{:02d}".format(int(tSeconds//3600), int((tSeconds%3600)//60), int(tSeconds%60), int((tSeconds%1) * 100)))
#----------------------------------------------------------------------------
class ImgObj(pygame.sprite.DirtySprite):
def __init__(self, path=None, image=None, canvas=None, alpha=False, pivot=(0,0)):
""" Create a sprite given an image file, or a pygame image object """
# Call the parent class (Sprite) constructor
super(ImgObj, self).__init__()
# Load an image, creating a Surface.
if path:
self.image = pygame.image.load(path)
if image:
self.image = image
if alpha:
self.image = self.image.convert_alpha()
else:
self.image = self.image.convert()
self.pivot = pivot
self.children = [] # Tethered children
# Fetch the rectangle object that has the dimensions of the image
# Update the position of this object by setting the values of rect.x and rect.y
self.rect = self.image.get_rect()
self.eraseRect = self.rect.copy()
def x(self): return self.rect.x
def y(self): return self.rect.y
def w(self): return self.image.get_width()
def h(self): return self.image.get_height()
def pos(self): return (self.x(), self.y())
def pivotX(self): return self.rect.x + self.pivot[0]
def pivotY(self): return self.rect.y + self.pivot[1]
def pivotPos(self): return (self.pivotX(), self.pivotY())
def lerp(self, st, en, p):
return float(en - st) * p + float(st)
def moveTo(self, p):
""" Move to point (x,y) """
self.rect.x = p[0] - self.pivot[0]
self.rect.y = p[1] - self.pivot[1]
if self.children:
self.moveChildren()
def moveChildren(self):
for c in self.children:
c.moveTo((0, 0))
def moveBetween(self, p1, p2, frac):
""" Linearly interpolate between p1 and p2.
Move to a point linearly interpolated between point p1 and point p2.
frac is the interpolation amount.
"""
self.moveTo((self.lerp(p1[0], p2[0], frac), self.lerp(p1[1], p2[1], frac)))
def addChild(self, tetheredChild):
self.children.append(tetheredChild)
#----------------------------------------------------------------------------
class TetheredImgObj(ImgObj):
""" An ImgObj that moves relative to a parent ImgObj. """
def __init__(self, path, canvas=None, alpha=False, pivot=(0,0), parent=None, offset=(0,0)):
super(TetheredImgObj, self).__init__(path, canvas=canvas, alpha=alpha, pivot=pivot)
self.parent = parent
self.parent.addChild(self)
self.offset = offset
def moveTo(self, dxy):
""" Move to offset relative to parent specified by delta """
super(TetheredImgObj, self).moveTo((self.parent.pivotX() + self.offset[0], self.parent.pivotY() + self.offset[1]))
#----------------------------------------------------------------------------
class QrImgObj(ImgObj):
""" An ImgObj that displays a QR code """
QR_BOX_SIZE_PX = 5
QR_BORDER_PX = 2
def __init__(self, qrText, canvas=None, alpha=False, pivot=(0,0)):
""" Create a QR code image containing the string qrText """
super(QrImgObj, self).__init__(image=self.makeQrCode(qrText),
canvas=canvas,
alpha=alpha,
pivot=pivot)
@staticmethod
def makeQrCode(qrText):
""" Generate a QR code image, and return it as a pygame image object """
qr = qrcode.QRCode(
version=None,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=QrImgObj.QR_BOX_SIZE_PX,
border=QrImgObj.QR_BORDER_PX,
)
qr.add_data(qrText)
qr.make(fit=True)
# Return a PIL image object
img = qr.make_image().convert("RGB")
# img = qrcode.make(qrText).convert("RGB")
return pygame.image.fromstring(img.tostring(), img.size, img.mode)
#----------------------------------------------------------------------------
class AnimGroup(pygame.sprite.LayeredDirty):
""" A Group that can sequence through multi-image sprites.
This creates an effect like an animated GIF.
The group can handle multiple sequences.
"""
def __init__(self):
self.sequences = [] # list of iterators; each iterator sequences through a list of sprites
super(AnimGroup, self).__init__()
def add(self, seq=None):
""" Add an image sequence to the group.
seq is a list or tuple of Sprites.
"""
if seq:
# Make all images invisible to start
for s in seq:
s.visible = 0
# Add an iterator that cycles through the sequence
self.sequences.append(itertools.cycle(seq))
super(AnimGroup, self).add(*seq)
def draw(self, surface):
""" Activate the next image in each sequence and draw. """
visibleSprites = []
# Make the next sprite in each sequence visible
for s in self.sequences:
sp = next(s)
sp.visible = 1
sp.dirty = 1
visibleSprites.append(sp)
# Draw the visible sprites
rects = super(AnimGroup, self).draw(surface)
# Make them invisible again
for sp in visibleSprites:
sp.visible = 0
return rects
def empty(self):
""" Clear the list of sprite iterators """
super(AnimGroup, self).empty()
self.sequences = []
#----------------------------------------------------------------------------
class FlightProfileApp(object):
""" The app reads and displays flight profile information from the Master Server.
The app uses pygame to display sprite-based graphics.
"""
WINDOW_TITLE = "Flight Profile"
FULLSCREEN = True
MAX_SIM_DURATION_S = 45.0 # longer sim will be compressed to 45 sec.
BG_LAYER = 0
LABEL_LAYER = 1
TEXT_LAYER = 2
SHIP_LAYER = 3
SCREEN_SIZE = (1920, 1080)
SCREEN_CENTER = (SCREEN_SIZE[0]//2, SCREEN_SIZE[1]//2)
FLIGHT_PATH_START = (400, 600)
FLIGHT_PATH_END = (1600, 750)
STARS_BG_IMG = "img/Star-field_2_cropped.jpg"
STARS_BG_POS = (0, 0)
EARTH_BG_IMG = "img/earth_cropped.png"
EARTH_BG_POS = (0, 800)
QR_POS = (50, 1050) # loc of bottom left corner
# QR_POS = (500, 500)
QR_LABEL_OFFSET = (10, -120) # offset from bottom right corner
QR_LABEL_SIZE = 30
STATION_IMG = "img/station_2.png"
STATION_PIVOT = (16, 225) # docking port
STATION_POS = FLIGHT_PATH_END
CAPSULE_IMG = "img/capsule_2.png"
CAPSULE_PIVOT = (234, 98) # nose
CAPSULE_POS = FLIGHT_PATH_START
FLAME_IMG = ("img/flame-1.png", "img/flame-2.png", "img/flame-3.png", "img/flame-4.png")
FLAME_PIVOT = (198, 98) # base of flame
FLAME_OFFSET = (-200, 0)
FLAME_UP_IMG = ("img/small_flame_up-1.png", "img/small_flame_up-2.png", "img/small_flame_up-3.png", "img/small_flame_up-4.png")
FLAME_UP_PIVOT = (11, 86)
FLAME_UP_OFFSET = (-24, -21)
FLAME_DOWN_IMG = ("img/small_flame_down-1.png", "img/small_flame_down-2.png", "img/small_flame_down-3.png", "img/small_flame_down-4.png")
FLAME_DOWN_PIVOT = (11, 16)
FLAME_DOWN_OFFSET = (-24, 21)
OUTCOMES = {
DockSim.OUTCOME_DNF : "Destination not reached due to loss of all forward velocity",
DockSim.OUTCOME_NO_FUEL : "Ran out of fuel before achieving proper docking velocity",
DockSim.OUTCOME_TOO_SLOW: "Latch failure due to insufficient forward velocity",
DockSim.OUTCOME_TOO_FAST: "Latch failure caused by excessive forward velocity",
DockSim.OUTCOME_SUCCESS : "Docked successfully. Nice job!",
}
READY_CMD = "READY"
WELCOME_CMD = "WELCOME"
RUN_CMD = "RUN"
QUIT_CMD = "QUIT"
KIOSK_CMD = "KIOSK"
def __init__(self):
self.canvas = None
self.timer = None
self.capsule = None
self.station = None
self.missionTimeLabel = None
self.simulatedLabel = None
self.actualLabel = None
self.simulatedTime = None
self.actualTime = None
self.paramsLabel = None
self.distLabel1 = None
self.distLabel2 = None
self.dist = None
self.frameRate = 30 # fps
self.frameClock = None
self.fullscreen = self.FULLSCREEN
self.arriveUrl = ""
self.dockUrl = ""
self.latchUrl = ""
self.maxVelocity = 0.0
self.simPhase = DockSim.START_PHASE
self.outOfFuel = False
self.staticGroup = pygame.sprite.LayeredUpdates()
self.statsGroup = pygame.sprite.LayeredDirty()
self.blinkingTextGroup = pygame.sprite.LayeredDirty()
self.movingGroup = pygame.sprite.OrderedUpdates()
self.animGroup = AnimGroup()
self.workQueue = None # work queue for multiprocess mode
self.stationCallbackObj = None
def initPygame(self):
""" Initialize the pygame modules that we need """
# pygame.init() # initialize all modules
pygame.display.init()
pygame.font.init()
pygame.mouse.set_visible(False)
self.frameClock = pygame.time.Clock()
self.timer = Timer()
self.timer.start()
def loadImageObjects(self):
""" Load sprite images from files, and dynamically create QR code images """
scriptDir = os.path.dirname(__file__)
self.stars = ImgObj(os.path.join(scriptDir, self.STARS_BG_IMG), alpha=False)
self.stars.moveTo(self.STARS_BG_POS)
self.earth = ImgObj(os.path.join(scriptDir, self.EARTH_BG_IMG), alpha=True)
self.earth.moveTo(self.EARTH_BG_POS)
# Load animated flames
self.capsule = ImgObj(os.path.join(scriptDir, self.CAPSULE_IMG), alpha=True, pivot=self.CAPSULE_PIVOT)
self.capsule.moveTo(self.CAPSULE_POS)
self.rearFlame = [TetheredImgObj(os.path.join(scriptDir, f), alpha=True, pivot=self.FLAME_PIVOT, parent=self.capsule, offset=self.FLAME_OFFSET) for f in self.FLAME_IMG]
self.frontFlameUp = [TetheredImgObj(os.path.join(scriptDir, f), alpha=True, pivot=self.FLAME_UP_PIVOT, parent=self.capsule, offset=self.FLAME_UP_OFFSET) for f in self.FLAME_UP_IMG]
self.frontFlameDown = [TetheredImgObj(os.path.join(scriptDir, f), alpha=True, pivot=self.FLAME_DOWN_PIVOT, parent=self.capsule, offset=self.FLAME_DOWN_OFFSET) for f in self.FLAME_DOWN_IMG]
self.station = ImgObj(os.path.join(scriptDir, self.STATION_IMG), alpha=True, pivot=self.STATION_PIVOT)
self.station.moveTo(self.STATION_POS)
# Generate the Arrive, Dock, and Latch QR codes to display in the corner of the screen
self.createQrCodes()
def setQrUrls(self, arriveUrl, dockUrl, latchUrl):
""" Set the URLs that will be turned into QR codes and displayed """
self.arriveUrl = arriveUrl
self.dockUrl = dockUrl
self.latchUrl = latchUrl
def createQrCodes(self):
""" Dynamically create some QR codes.
They will contain the address of this station, so they have to
be generated at runtime.
"""
self.arriveQr = QrImgObj(self.arriveUrl)
self.arriveQr.moveTo((self.QR_POS[0], self.QR_POS[1] - self.arriveQr.h()))
self.dockQr = QrImgObj(self.dockUrl)
self.dockQr.moveTo((self.QR_POS[0], self.QR_POS[1] - self.dockQr.h()))
self.latchQr = QrImgObj(self.latchUrl)
self.latchQr.moveTo((self.QR_POS[0], self.QR_POS[1] - self.latchQr.h()))
def displayQrCode(self, qrCodeImg, label=None):
""" Display a QrImgObj with a text label """
self.staticGroup.add(qrCodeImg)
if label:
# text = Text((self.QR_POS[0] + qrCodeImg.w(), self.QR_POS[1]), value=label, size=self.QR_LABEL_SIZE, color=Colors.WHITE)
pos = (self.QR_POS[0] + qrCodeImg.w() + self.QR_LABEL_OFFSET[0],
self.QR_POS[1] + self.QR_LABEL_OFFSET[1])
text = Text(pos, value=label, size=self.QR_LABEL_SIZE, color=Colors.WHITE)
self.staticGroup.add(text)
def setupBackgroundDisplay(self):
self.staticGroup.empty()
self.staticGroup.add((self.stars, self.earth), layer=self.BG_LAYER)
# self.staticGroup.add(self.arriveQr, layer=self.BG_LAYER)
def setupMissionTimeDisplay(self):
X = 100
Y = 100
LINE_SPACE = 70
BIG_TEXT = 80
SMALL_TEXT = 60
TINY_TEXT = 35
TAB1 = 300
TAB2 = TAB1 + 40
self.missionTimeLabel = Text((X, Y), value="Mission Time", size=BIG_TEXT, color=Colors.ORANGE)
self.simulatedLabel = Text((X+TAB1, Y+LINE_SPACE), value="Simulated:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.actualLabel = Text((X+TAB1, Y+LINE_SPACE*2), value="Actual:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.compressionLabel = Text((X+TAB1, int(Y+LINE_SPACE*2.7)), value="Time Compression:", color=Colors.ORANGE, size=TINY_TEXT, justify=Text.RIGHT|Text.BOTTOM)
self.staticGroup.add((self.missionTimeLabel, self.simulatedLabel, self.actualLabel, self.compressionLabel), layer=self.LABEL_LAYER)
self.userLabel = Text((X, int(Y+LINE_SPACE*4.5)), value="Flight Parameters", size=BIG_TEXT, color=Colors.ORANGE)
self.tAftLabel = Text((X+50, int(Y+LINE_SPACE*5.2)), value="tAft:", size=TINY_TEXT, color=Colors.ORANGE, justify=Text.LEFT|Text.BOTTOM)
self.tAftVal = Text((self.tAftLabel.rect.right+15, int(Y+LINE_SPACE*5.2)), value="{:05.1f}".format(self.profile.tAft), size=TINY_TEXT, color=Colors.CYAN, justify=Text.LEFT|Text.BOTTOM)
self.tCoastLabel = Text((self.tAftVal.rect.right+50, int(Y+LINE_SPACE*5.2)), value="tCoast:", size=TINY_TEXT, color=Colors.ORANGE, justify=Text.LEFT|Text.BOTTOM)
self.tCoastVal = Text((self.tCoastLabel.rect.right+15, int(Y+LINE_SPACE*5.2)), value="{:05.1f}".format(self.profile.tCoast), size=TINY_TEXT, color=Colors.CYAN, justify=Text.LEFT|Text.BOTTOM)
self.tForeLabel = Text((self.tCoastVal.rect.right+50, int(Y+LINE_SPACE*5.2)), value="tFore:", size=TINY_TEXT, color=Colors.ORANGE, justify=Text.LEFT|Text.BOTTOM)
self.tForeVal = Text((self.tForeLabel.rect.right+15, int(Y+LINE_SPACE*5.2)), value="{:05.1f}".format(self.profile.tFore), size=TINY_TEXT, color=Colors.CYAN, justify=Text.LEFT|Text.BOTTOM)
self.staticGroup.add((self.userLabel, self.tAftLabel, self.tCoastLabel, self.tForeLabel,), layer=self.LABEL_LAYER)
self.staticGroup.add((self.tAftVal, self.tCoastVal, self.tForeVal,), layer=self.LABEL_LAYER)
self.simulatedTime = Clock((X+TAB2, Y+LINE_SPACE), size=SMALL_TEXT)
self.actualTime = Clock((X+TAB2, Y+LINE_SPACE*2), size=SMALL_TEXT)
self.compression = Text((X+TAB2, int(Y+LINE_SPACE*2.7)), value="1 sim = {:0.2f} actual sec".format(self.missionTimeScale), size=TINY_TEXT)
self.statsGroup.add((self.simulatedTime, self.actualTime))
self.staticGroup.add((self.compression), layer=self.LABEL_LAYER)
def setupStatsDisplay(self):
X = 1000
Y = 100
LINE_SPACE = 60
BIG_TEXT = 80
SMALL_TEXT = 50
TAB1 = 450
TAB2 = TAB1 + 40
self.paramsLabel = Text((X, Y), value="Flight Performance", size=BIG_TEXT, color=Colors.ORANGE)
self.distLabel = Text((X+TAB1, Y + LINE_SPACE), value="Closing Distance:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.velLabel = Text((X+TAB1, Y + LINE_SPACE*2), value="Relative Velocity:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.vmaxLabel = Text((X+TAB1, Y + LINE_SPACE*3), value="Max Rel Velocity:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.accelLabel = Text((X+TAB1, Y + LINE_SPACE*4), value="Acceleration:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.fuelLabel = Text((X+TAB1, Y + LINE_SPACE*5), value="Fuel Remaining:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.phaseLabel = Text((X+TAB1, Y + LINE_SPACE*6), value="Phase:", size=SMALL_TEXT, color=Colors.ORANGE, justify=Text.RIGHT|Text.BOTTOM)
self.staticGroup.add((self.paramsLabel, self.distLabel, self.velLabel, self.vmaxLabel, self.accelLabel, self.fuelLabel, self.phaseLabel), layer=self.LABEL_LAYER)
self.distance = Text((X+TAB2, Y + LINE_SPACE), value="0", size=SMALL_TEXT)
self.velocity = Text((X+TAB2, Y + LINE_SPACE*2), value="0", size=SMALL_TEXT)
self.vmax = Text((X+TAB2, Y + LINE_SPACE*3), value="0", size=SMALL_TEXT)
self.acceleration = Text((X+TAB2, Y + LINE_SPACE*4), value="0", size=SMALL_TEXT)
self.fuelRemaining = Text((X+TAB2, Y + LINE_SPACE*5), value="0", size=SMALL_TEXT)
self.phase = Text((X+TAB2, Y + LINE_SPACE*6), value=DockSim.PHASE_STR[DockSim.ACCEL_PHASE], size=SMALL_TEXT)
self.statsGroup.add((self.distance, self.velocity, self.vmax, self.acceleration, self.fuelRemaining, self.phase))
def setupSpaceshipDisplay(self):
#self.movingGroup.add((self.station, self.capsule))
self.staticGroup.add((self.station), layer=self.SHIP_LAYER)
self.movingGroup.add(self.capsule)
def initScreen(self):
""" Initialize the drawing surface """
# Create the window
if self.fullscreen:
self.canvas = pygame.display.set_mode((0,1080), pygame.FULLSCREEN | pygame.DOUBLEBUF | pygame.HWSURFACE)# | pygame.OPENGL)
else:
self.canvas = pygame.display.set_mode((0,1080), pygame.DOUBLEBUF | pygame.HWSURFACE)# | pygame.OPENGL)
# Set the window title (not visible in fullscreen mode)
pygame.display.set_caption(self.WINDOW_TITLE)
def setupDisplay(self):
""" Create the display window and the components within it """
# Set up parts of the display
self.setupBackgroundDisplay()
self.setupMissionTimeDisplay()
self.setupStatsDisplay()
self.setupSpaceshipDisplay()
def setFlightProfile(self, flightParams=None):
# Get flight profile parameters
if flightParams:
self.profile = flightParams
else:
# Set to some default params for testing
self.profile = FlightParams(tAft=8.2,#8.4#8.3
tCoast=1, #0
tFore=13.1,
aAft=0.15,
aFore=0.09,
rFuel=0.7,
qFuel=20,
dist=15.0,
vMin=0.01,
vMax=0.1,
vInit=0.0,
tSim=self.MAX_SIM_DURATION_S,
)
# Make sure the user parameters are ddd.d format
self.profile.tAft = (math.trunc(self.profile.tAft * 10) % 10000)/10.0
self.profile.tCoast = (math.trunc(self.profile.tCoast * 10) % 10000)/10.0
self.profile.tFore = (math.trunc(self.profile.tFore * 10) % 10000)/10.0
# Create a simulation object initialized with the flight profile
self.dockSim = DockSim(self.profile)
self.maxVelocity = 0.0
# Get the total time of flight
self.duration = self.dockSim.flightDuration()
if self.duration is None: # flight did not complete (0 or neg. velocity)
self.duration = DockSim.MAX_FLIGHT_DURATION_S
print("duration:", self.duration)
# Compute time scaling for simulation playback
self.simDuration = min(self.duration, self.profile.tSim)
print("simDuration:", self.simDuration)
self.missionTimeScale = max(1.0, float(self.duration)/self.profile.tSim)
print("missionTimeScale:", self.missionTimeScale)
print("terminalVelocity:", self.dockSim.terminalVelocity())
print("success:", self.dockSim.dockIsSuccessful())
self.simPhase = DockSim.START_PHASE # set phase to initial simulation phase
self.outOfFuel = False
def createReadyText(self):
GIANT_TEXT = 300
text = Text(self.SCREEN_CENTER,
value="READY",
size=GIANT_TEXT,
color=Colors.ORANGE,
justify=Text.CENTER|Text.MIDDLE,
intervalsMs=(1000,0))
self.blinkingTextGroup.add(text)
def createWelcomeText(self, name):
GIANT_TEXT = 300
text = Text(self.SCREEN_CENTER, #(self.SCREEN_CENTER[0], 300),
value="Welcome",
size=GIANT_TEXT,
color=Colors.ORANGE,
justify=Text.CENTER|Text.BOTTOM)
nameText = Text(self.SCREEN_CENTER,
value=str(name),
size=int(GIANT_TEXT * 0.6),
color=Colors.WHITE,
justify=Text.CENTER|Text.TOP,
shrinkToWidth=int(self.SCREEN_SIZE[0]*0.9))
self.blinkingTextGroup.add((text, nameText))
def createPassFailText(self, passed=True, msg=None):
GIANT_TEXT = 300
text = Text(self.SCREEN_CENTER,
value="SUCCESS!" if passed else "FAIL!",
size=GIANT_TEXT,
color=Colors.GREEN if passed else Colors.RED,
justify=Text.CENTER|Text.MIDDLE,
intervalsMs=(1000,250))
self.blinkingTextGroup.add(text)
if msg:
if len(msg) < 50:
msgText = Text((self.SCREEN_CENTER[0], 750),
value=msg,
size=int(GIANT_TEXT * 0.3),
color=Colors.GREEN if passed else Colors.RED,
justify=Text.CENTER|Text.MIDDLE)
self.blinkingTextGroup.add(msgText)
else: # divide the text into two lines
words = msg.split()
nWords = len(words)
line1 = " ".join(words[:nWords//2])
line2 = " ".join(words[nWords//2:])
msgText1 = Text((self.SCREEN_CENTER[0], 750),
value=line1,
size=int(GIANT_TEXT * 0.3),
color=Colors.GREEN if passed else Colors.RED,
justify=Text.CENTER|Text.MIDDLE)
msgText2 = Text((self.SCREEN_CENTER[0], 850),
value=line2,
size=int(GIANT_TEXT * 0.3),
color=Colors.GREEN if passed else Colors.RED,
justify=Text.CENTER|Text.MIDDLE)
self.blinkingTextGroup.add((msgText1, msgText2))
self.displayQrCode(self.latchQr, label="Scan to Activate Docking Latch")
def createKioskScreen(self, args):
""" Put up some text on the display
Args contains a list of tuples. Each tuple describes a block of
text that may span multiple lines. Each tuple is of the form:
(pointsize, color, position, justification, text). text may contain
newline characters to cause the text to be rendered on multiple
lines. Otherwise, the text will remain on one line, and the
pointsize will be reduced to make the text fit within the line width.
"""
args = eval(args)
for ptsize,color,pos,justify,text in args:
pos = list(pos)
splitChar = "\n" if "\n" in text else "|"
# Create all the lines, shrinking to fit the screen width (with a little margin)
textSprites = []
for t in text.split(splitChar):
textSprites.append(Text(pos, t, size=ptsize, color=color, justify=justify,
shrinkToWidth=int(self.SCREEN_SIZE[0]*0.9)))
# If the lines don't fit vertically, shrink them more until they do
numLines = len(textSprites)
smallestPtSize = min([t.pointSize for t in textSprites])
smallestLineHeight = min([t.lineHeight() for t in textSprites])
totalHeight = smallestLineHeight * numLines
vertSize = int(self.SCREEN_SIZE[1]*0.9)
if totalHeight > vertSize:
# Assume that everything scales proportionally
smallestPtSize = int(smallestPtSize * vertSize/totalHeight)
smallestLineHeight = int(smallestLineHeight * vertSize/totalHeight)
# # Figure out what size to make the text
# smallestLineHeight = min([t.lineHeight() for t in textSprites])
del textSprites
if justify & Text.BOTTOM:
pos[1] -= smallestLineHeight * (numLines - 1)
elif justify & Text.MIDDLE:
pos[1] -= int(smallestLineHeight * (numLines - 1)/2)
# Now remake all the lines at the smallest point size
for t in text.split(splitChar):
textSprite = Text(pos, t, size=smallestPtSize, color=color, justify=justify)
self.blinkingTextGroup.add(textSprite)
pos[1] += smallestLineHeight
def drawCharts(self):
pass
def draw(self):
# Draw background and labels (should only do when necessary)
rectList = self.staticGroup.draw(self.canvas)
# Draw changing text fields
rectList += self.statsGroup.draw(self.canvas)
# Draw graphical objects (capsule and station)
rectList += self.animGroup.draw(self.canvas)
rectList += self.movingGroup.draw(self.canvas)
rectList += self.blinkingTextGroup.draw(self.canvas)
# Only copy the modified parts of the canvas to the display
pygame.display.update(rectList)
def update(self):
""" Update the simulation """
# Get the current elapsed time
t = self.timer.elapsedSec()
# Compute the simulation state values for the current time
state = self.dockSim.shipState(t * self.missionTimeScale)
# Update stats
if state.phase == DockSim.END_PHASE:
self.timer.stop()
self.simulatedTime.setValue(state.tEnd/self.missionTimeScale)
self.actualTime.setValue(state.tEnd)
self.maxVelocity = max(self.maxVelocity, state.currVelocity)
self.distance.setValue("{:0.2f} m".format(self.profile.dist - state.distTraveled))
if state.currVelocity >= 0.01:
self.velocity.setValue("{:0.2f} m/sec".format(state.currVelocity), color=Colors.GREEN if self.dockSim.safeDockingVelocity(state.currVelocity) else Colors.RED)
else: # show more decimal places
self.velocity.setValue("{:0.6f} m/sec".format(state.currVelocity), color=Colors.GREEN if self.dockSim.safeDockingVelocity(state.currVelocity) else Colors.RED)
self.vmax.setValue("{:0.2f} m/sec".format(self.maxVelocity))
self.acceleration.setValue("{:0.2f} m/sec^2".format((0.0,
self.profile.aAft if not self.outOfFuel else 0.0,
0.0,
-self.profile.aFore if not self.outOfFuel else 0.0,
0.0,
0.0)[state.phase]))
self.fuelRemaining.setValue("{:0.2f} kg".format(state.fuelRemaining), color=Colors.GREEN if state.fuelRemaining > 0.0 else Colors.RED)
self.phase.setValue(DockSim.PHASE_STR[state.phase])
# Update graphics
changeDetected = False
# Detect running out of fuel
if not self.outOfFuel and state.fuelRemaining <= 0.0:
self.outOfFuel = True
changeDetected = True
# Detect a phase change
if state.phase != self.simPhase:
self.simPhase = state.phase
changeDetected = True
# If a phase change was detected or the ship ran out of fuel,
# update the ship graphics to reflect the new state
if changeDetected:
self.animGroup.empty()
if state.phase == DockSim.ACCEL_PHASE:
if not self.outOfFuel:
self.animGroup.add(self.rearFlame)
elif state.phase == DockSim.DECEL_PHASE:
if not self.outOfFuel:
self.animGroup.add(self.frontFlameUp)
self.animGroup.add(self.frontFlameDown)
elif state.phase == DockSim.END_PHASE:
passed = self.dockSim.dockIsSuccessful()
# msg = self.outcomeMessage(state)
result = self.dockSim.outcome(state)
self.createPassFailText(passed=passed, msg=self.OUTCOMES[result])
self.reportPassFail(passed, state.tEnd, result)
# Compute the fraction of the total trip distance that has been traversed,
# and place the ship at that location
frac = state.distTraveled/self.profile.dist
self.capsule.moveBetween(self.FLIGHT_PATH_START, self.FLIGHT_PATH_END, frac)
# Update any text objects that might be animated
for sp in self.blinkingTextGroup.sprites():
sp.update()
def userQuit(self):
# Retrieve queued events from mouse, keyboard, timers
highPriorityEvents = pygame.event.get(pygame.QUIT) +\
pygame.event.get(pygame.KEYUP)
pygame.event.get() # flush the rest of the events
# Retrieve an event from the event queue
if highPriorityEvents:
# Process the event
# First check to see whether we should quit
event = highPriorityEvents.pop()
return event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE)
return False
def mainLoop(self):
""" Receive events from the user and/or application objects, and update the display """
highPriorityEvents = []
lastFrameMs = 0 # @UnusedVariable
# Draw the whole background once
# self.draw()
self.staticGroup.draw(self.canvas)
pygame.display.update()
# Start the simulation time clock
self.timer.start()
# Run the simulation
while True: # main game loop
# Retrieve queued events from mouse, keyboard, timers
highPriorityEvents += pygame.event.get(pygame.QUIT) +\
pygame.event.get(pygame.KEYUP)
pygame.event.get() # flush the rest of the events
# Retrieve an event from the event queue
if highPriorityEvents:
# Process the event
# First check to see whether we should quit
event = highPriorityEvents.pop()
if event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE):
pygame.quit()
return
# Refresh the display
self.update()
self.draw()
lastFrameMs = self.frameClock.tick(self.frameRate) # @UnusedVariable
def processLoop(self):
""" Update the display """
# Refresh the display
self.update()
self.draw()
lastFrameMs = self.frameClock.tick(self.frameRate) # @UnusedVariable
def clearDisplay(self):
""" Clear out all the sprite groups, but leave the background """
self.animGroup.empty()
self.blinkingTextGroup.empty()
self.movingGroup.empty()
self.statsGroup.empty()
self.staticGroup.empty()
# Draw the whole background once
self.setupBackgroundDisplay()
self.staticGroup.draw(self.canvas)
pygame.display.update()
def clearBackground(self):
self.staticGroup.empty()
def takeDownDisplay(self):
pygame.quit()
def showReadyScreen(self):
""" Display an initial greeting screen """
self.createReadyText()
self.displayQrCode(self.arriveQr, "Scan to Start Challenge")
def showWelcomeScreen(self, teamName):
""" Display the team welcome screen """
self.createWelcomeText(name=teamName)
self.displayQrCode(self.dockQr, "Scan to Initiate Dock Procedure")
def outcomeMessage(self, state):
return self.OUTCOMES[self.dockSim.outcome(state)] # get failure (or success) message
def reportPassFail(self, passed, simTime, msg):
""" Report back to the station framework that the sim is finished
and pass it the pass/fail status and the simulation elapsed time.
Returns:
A string stating success or the reason for failure
"""
if self.stationCallbackObj:
self.stationCallbackObj.args = (str(passed), str(simTime), msg)
self.stationCallbackObj.State = State.PROCESSING_COMPLETED
return msg
def countDown(self):
""" Display a 3...2...1 countdown """
GIANT_TEXT = 900
text3 = Text(self.SCREEN_CENTER,
value="3",
size=GIANT_TEXT,
color=Colors.ORANGE,
justify=Text.CENTER|Text.MIDDLE,
intervalsMs=(0,1000, 1000,5000))
text2 = Text(self.SCREEN_CENTER,
value="2",
size=GIANT_TEXT,
color=Colors.LIGHT_ORANGE,
justify=Text.CENTER|Text.MIDDLE,
intervalsMs=(0,2000, 1000,5000))
text1 = Text(self.SCREEN_CENTER,
value="1",
size=GIANT_TEXT,
color=Colors.WHITE,
justify=Text.CENTER|Text.MIDDLE,
intervalsMs=(0,3000, 1000,5000))
self.blinkingTextGroup.add((text3, text2, text1))
self.timer.start()
while self.timer.elapsedSec() < 4.0:
self.updateBlinkingText()
self.draw()
self.frameClock.tick(self.frameRate)
def run(self, flightProfile):
self.setFlightProfile(flightProfile)
self.initPygame()
self.initScreen()
self.loadImageObjects()
self.setupDisplay()
self.mainLoop()
self.takeDownDisplay()
def updateBlinkingText(self):
for sp in self.blinkingTextGroup.sprites():
sp.update()
def runFromQueue(self, queue):
""" This method is called to start the sim as a separate process.
Work will be passed to the process in the queue. When "QUIT"
is received, the process will shut down.
"""
self.workQueue = queue
self.initPygame()
self.initScreen()
self.loadImageObjects()
self.setupBackgroundDisplay()
# self.createReadyText()
self.showReadyScreen()
updateProc = self.updateBlinkingText
done = False
while not done:
cmd = None
while cmd is None:
try:
cmd,args = self.workQueue.get_nowait()
#print("Got work ({},{})".format(repr(cmd), repr(args)))
except Queue.Empty:
pass
updateProc()
self.draw()
if self.userQuit():
cmd = self.QUIT_CMD
break
self.frameClock.tick(self.frameRate)
self.clearDisplay()
if cmd == self.RUN_CMD:
#print("RUN_CMD")
self.countDown() # blocks for 3 seconds while it counts down
self.clearDisplay()
self.setFlightProfile(args)
self.setupMissionTimeDisplay()
self.setupStatsDisplay()
self.setupSpaceshipDisplay()
self.timer.start()
updateProc = self.update
#self.processLoop() # stay in processLoop() until sim is complete
elif cmd == self.READY_CMD:
#print("READY_CMD")
# self.createReadyText()
self.showReadyScreen()
updateProc = self.updateBlinkingText
elif cmd == self.WELCOME_CMD:
#print("WELCOME_CMD")
# self.createWelcomeText(name=args)
self.showWelcomeScreen(teamName=args)
updateProc = self.updateBlinkingText
elif cmd == self.KIOSK_CMD:
self.createKioskScreen(args)
updateProc = self.updateBlinkingText
elif cmd == self.QUIT_CMD:
#print("QUIT_CMD")
self.clearBackground()
self.takeDownDisplay()
done = True
#============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-f", "--fullscreen", action="store_true", help="display graphics in 1920x1080 fullscreen mode")
parser.add_argument("--tAft", type=float, required=True, help="duration of acceleration burn phase, in sec")
parser.add_argument("--tCoast", type=float, required=True, help="duration of coast phase, in sec")
parser.add_argument("--tFore", type=float, required=True, help="duration of deceleration burn phase, in sec")
parser.add_argument("--aAft", type=float, required=True, help="acceleration force, in m/sec^2")
parser.add_argument("--aFore", type=float, required=True, help="deceleration force, in m/sec^2")
parser.add_argument("--rFuel", type=float, required=True, help="rate of fuel consumption, in kg/sec")
parser.add_argument("--qFuel", type=float, required=True, help="initial fuel amount, in kg")
parser.add_argument("--dist", type=float, required=True, help="initial dock distance, in m")
parser.add_argument("--vMin", type=float, default=DockSim.MIN_V_DOCK, help="minimum velocity for successfull doc, in m/sec")
parser.add_argument("--vMax", type=float, default=DockSim.MAX_V_DOCK, help="maximum velocity for successfull doc, in m/sec")
parser.add_argument("--vInit", type=float, default=DockSim.INITIAL_V, help="initial velocity, in m/sec")
parser.add_argument("--tSim", type=int, default=FlightProfileApp.MAX_SIM_DURATION_S, help="max simulation time, in sec")
args = parser.parse_args()
flightProfile = FlightParams(tAft=args.tAft,
tCoast=args.tCoast,
tFore=args.tFore,
aAft=args.aAft,
aFore=args.aFore,
rFuel=args.rFuel,
qFuel=args.qFuel,
dist=args.dist,
vMin=args.vMin,
vMax=args.vMax,
vInit=args.vInit,
tSim=args.tSim,
)
app = FlightProfileApp()
app.fullscreen = args.fullscreen
app.run(flightProfile)
sys.exit(0)
|
|
"""Command line functions for calling the root mfa command"""
from __future__ import annotations
import argparse
import atexit
import multiprocessing as mp
import sys
import time
from datetime import datetime
from typing import TYPE_CHECKING
from montreal_forced_aligner.command_line.adapt import run_adapt_model
from montreal_forced_aligner.command_line.align import run_align_corpus
from montreal_forced_aligner.command_line.anchor import run_anchor
from montreal_forced_aligner.command_line.classify_speakers import run_classify_speakers
from montreal_forced_aligner.command_line.create_segments import run_create_segments
from montreal_forced_aligner.command_line.g2p import run_g2p
from montreal_forced_aligner.command_line.model import run_model
from montreal_forced_aligner.command_line.train_acoustic_model import run_train_acoustic_model
from montreal_forced_aligner.command_line.train_dictionary import run_train_dictionary
from montreal_forced_aligner.command_line.train_g2p import run_train_g2p
from montreal_forced_aligner.command_line.train_ivector_extractor import (
run_train_ivector_extractor,
)
from montreal_forced_aligner.command_line.train_lm import run_train_lm
from montreal_forced_aligner.command_line.transcribe import run_transcribe_corpus
from montreal_forced_aligner.command_line.validate import run_validate_corpus
from montreal_forced_aligner.config import (
load_command_history,
load_global_config,
update_command_history,
update_global_config,
)
from montreal_forced_aligner.exceptions import MFAError
from montreal_forced_aligner.models import MODEL_TYPES
from montreal_forced_aligner.utils import check_third_party
if TYPE_CHECKING:
from argparse import ArgumentParser
BEGIN = time.time()
BEGIN_DATE = datetime.now()
__all__ = ["ExitHooks", "create_parser", "main"]
class ExitHooks(object):
"""
Class for capturing exit information for MFA commands
"""
def __init__(self):
self.exit_code = None
self.exception = None
def hook(self):
"""Hook for capturing information about exit code and exceptions"""
self._orig_exit = sys.exit
sys.exit = self.exit
sys.excepthook = self.exc_handler
def exit(self, code=0):
"""Actual exit for the program"""
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exc_type, exc, *args):
"""Handle and save exceptions"""
self.exception = exc
self.exit_code = 1
def history_save_handler(self) -> None:
"""
Handler for saving history on exit. In addition to the command run, also saves exit code, whether
an exception was encountered, when the command was executed, and how long it took to run
"""
from montreal_forced_aligner.utils import get_mfa_version
history_data = {
"command": " ".join(sys.argv),
"execution_time": time.time() - BEGIN,
"date": BEGIN_DATE,
"version": get_mfa_version(),
}
if self.exit_code is not None:
history_data["exit_code"] = self.exit_code
history_data["exception"] = ""
elif self.exception is not None:
history_data["exit_code"] = 1
history_data["exception"] = str(self.exception)
else:
history_data["exception"] = ""
history_data["exit_code"] = 0
update_command_history(history_data)
if self.exception:
raise self.exception
def create_parser() -> ArgumentParser:
"""
Constructs the MFA argument parser
Returns
-------
:class:`~argparse.ArgumentParser`
MFA argument parser
"""
GLOBAL_CONFIG = load_global_config()
def add_global_options(subparser: argparse.ArgumentParser, textgrid_output: bool = False):
"""
Add a set of global options to a subparser
Parameters
----------
subparser: :class:`~argparse.ArgumentParser`
Subparser to augment
textgrid_output: bool
Flag for whether the subparser is used for a command that generates TextGrids
"""
subparser.add_argument(
"-t",
"--temp_directory",
"--temporary_directory",
dest="temporary_directory",
type=str,
default=GLOBAL_CONFIG["temporary_directory"],
help=f"Temporary directory root to store MFA created files, default is {GLOBAL_CONFIG['temporary_directory']}",
)
subparser.add_argument(
"--disable_mp",
help=f"Disable any multiprocessing during alignment (not recommended), default is {not GLOBAL_CONFIG['use_mp']}",
action="store_true",
default=not GLOBAL_CONFIG["use_mp"],
)
subparser.add_argument(
"-j",
"--num_jobs",
type=int,
default=GLOBAL_CONFIG["num_jobs"],
help=f"Number of data splits (and cores to use if multiprocessing is enabled), defaults "
f"is {GLOBAL_CONFIG['num_jobs']}",
)
subparser.add_argument(
"-v",
"--verbose",
help=f"Output debug messages, default is {GLOBAL_CONFIG['verbose']}",
action="store_true",
default=GLOBAL_CONFIG["verbose"],
)
subparser.add_argument(
"--clean",
help=f"Remove files from previous runs, default is {GLOBAL_CONFIG['clean']}",
action="store_true",
default=GLOBAL_CONFIG["clean"],
)
subparser.add_argument(
"--overwrite",
help=f"Overwrite output files when they exist, default is {GLOBAL_CONFIG['overwrite']}",
action="store_true",
default=GLOBAL_CONFIG["overwrite"],
)
subparser.add_argument(
"--debug",
help=f"Run extra steps for debugging issues, default is {GLOBAL_CONFIG['debug']}",
action="store_true",
default=GLOBAL_CONFIG["debug"],
)
if textgrid_output:
subparser.add_argument(
"--disable_textgrid_cleanup",
help=f"Disable extra clean up steps on TextGrid output, default is {not GLOBAL_CONFIG['cleanup_textgrids']}",
action="store_true",
default=not GLOBAL_CONFIG["cleanup_textgrids"],
)
pretrained_acoustic = ", ".join(MODEL_TYPES["acoustic"].get_available_models())
if not pretrained_acoustic:
pretrained_acoustic = (
"you can use ``mfa model download acoustic`` to get pretrained MFA models"
)
pretrained_ivector = ", ".join(MODEL_TYPES["ivector"].get_available_models())
if not pretrained_ivector:
pretrained_ivector = (
"you can use ``mfa model download ivector`` to get pretrained MFA models"
)
pretrained_g2p = ", ".join(MODEL_TYPES["g2p"].get_available_models())
if not pretrained_g2p:
pretrained_g2p = "you can use ``mfa model download g2p`` to get pretrained MFA models"
pretrained_lm = ", ".join(MODEL_TYPES["language_model"].get_available_models())
if not pretrained_lm:
pretrained_lm = (
"you can use ``mfa model download language_model`` to get pretrained MFA models"
)
pretrained_dictionary = ", ".join(MODEL_TYPES["dictionary"].get_available_models())
if not pretrained_dictionary:
pretrained_dictionary = (
"you can use ``mfa model download dictionary`` to get MFA dictionaries"
)
dictionary_path_help = f"Full path to pronunciation dictionary, or saved dictionary name ({pretrained_dictionary})"
acoustic_model_path_help = (
f"Full path to pre-trained acoustic model, or saved model name ({pretrained_acoustic})"
)
language_model_path_help = (
f"Full path to pre-trained language model, or saved model name ({pretrained_lm})"
)
ivector_model_path_help = f"Full path to pre-trained ivector extractor model, or saved model name ({pretrained_ivector})"
g2p_model_path_help = (
f"Full path to pre-trained G2P model, or saved model name ({pretrained_g2p}). "
"If not specified, then orthographic transcription is split into pronunciations."
)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="subcommand")
subparsers.required = True
_ = subparsers.add_parser("version")
align_parser = subparsers.add_parser(
"align", help="Align a corpus with a pretrained acoustic model"
)
align_parser.add_argument("corpus_directory", help="Full path to the directory to align")
align_parser.add_argument(
"dictionary_path",
help=dictionary_path_help,
type=str,
)
align_parser.add_argument(
"acoustic_model_path",
type=str,
help=acoustic_model_path_help,
)
align_parser.add_argument(
"output_directory",
type=str,
help="Full path to output directory, will be created if it doesn't exist",
)
align_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for alignment"
)
align_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
align_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
align_parser.add_argument(
"--reference_directory",
type=str,
default="",
help="Directory containing gold standard alignments to evaluate",
)
align_parser.add_argument(
"--custom_mapping_path",
type=str,
default="",
help="YAML file for mapping phones across phone sets in evaluations",
)
add_global_options(align_parser, textgrid_output=True)
adapt_parser = subparsers.add_parser("adapt", help="Adapt an acoustic model to a new corpus")
adapt_parser.add_argument("corpus_directory", help="Full path to the directory to align")
adapt_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help)
adapt_parser.add_argument(
"acoustic_model_path",
type=str,
help=acoustic_model_path_help,
)
adapt_parser.add_argument(
"output_paths",
type=str,
nargs="+",
help="Path to save the new acoustic model, path to export aligned TextGrids, or both",
)
adapt_parser.add_argument(
"-o",
"--output_model_path",
type=str,
default="",
help="Full path to save adapted acoustic model",
)
adapt_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for alignment"
)
adapt_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
adapt_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
add_global_options(adapt_parser, textgrid_output=True)
train_parser = subparsers.add_parser(
"train", help="Train a new acoustic model on a corpus and optionally export alignments"
)
train_parser.add_argument(
"corpus_directory", type=str, help="Full path to the source directory to align"
)
train_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help, default="")
train_parser.add_argument(
"output_paths",
type=str,
nargs="+",
help="Path to save the new acoustic model, path to export aligned TextGrids, or both",
)
train_parser.add_argument(
"--config_path",
type=str,
default="",
help="Path to config file to use for training and alignment",
)
train_parser.add_argument(
"-o",
"--output_model_path",
type=str,
default="",
help="Full path to save resulting acoustic model",
)
train_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of filenames to use for determining speaker, "
"default is to use directory names",
)
train_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
train_parser.add_argument(
"--phone_set",
dest="phone_set_type",
type=str,
help="Enable extra decision tree modeling based on the phone set",
default="UNKNOWN",
choices=["AUTO", "IPA", "ARPA", "PINYIN"],
)
add_global_options(train_parser, textgrid_output=True)
validate_parser = subparsers.add_parser("validate", help="Validate a corpus for use in MFA")
validate_parser.add_argument(
"corpus_directory", type=str, help="Full path to the source directory to align"
)
validate_parser.add_argument(
"dictionary_path", type=str, help=dictionary_path_help, default=""
)
validate_parser.add_argument(
"acoustic_model_path",
type=str,
nargs="?",
default="",
help=acoustic_model_path_help,
)
validate_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
validate_parser.add_argument(
"--config_path",
type=str,
default="",
help="Path to config file to use for training and alignment",
)
validate_parser.add_argument(
"--test_transcriptions", help="Test accuracy of transcriptions", action="store_true"
)
validate_parser.add_argument(
"--ignore_acoustics",
"--skip_acoustics",
dest="ignore_acoustics",
help="Skip acoustic feature generation and associated validation",
action="store_true",
)
validate_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
validate_parser.add_argument(
"--phone_set",
dest="phone_set_type",
type=str,
help="Enable extra decision tree modeling based on the phone set",
default="UNKNOWN",
choices=["AUTO", "IPA", "ARPA", "PINYIN"],
)
add_global_options(validate_parser)
g2p_parser = subparsers.add_parser(
"g2p", help="Generate a pronunciation dictionary using a G2P model"
)
g2p_parser.add_argument(
"g2p_model_path",
help=g2p_model_path_help,
type=str,
nargs="?",
)
g2p_parser.add_argument(
"input_path",
type=str,
help="Corpus to base word list on or a text file of words to generate pronunciations",
)
g2p_parser.add_argument("output_path", type=str, help="Path to save output dictionary")
g2p_parser.add_argument(
"--include_bracketed",
help="Included words enclosed by brackets, job_name.e. [...], (...), <...>",
action="store_true",
)
g2p_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for G2P"
)
add_global_options(g2p_parser)
train_g2p_parser = subparsers.add_parser(
"train_g2p", help="Train a G2P model from a pronunciation dictionary"
)
train_g2p_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help)
train_g2p_parser.add_argument(
"output_model_path", type=str, help="Desired location of generated model"
)
train_g2p_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for G2P"
)
train_g2p_parser.add_argument(
"--evaluate",
"--validate",
dest="evaluation_mode",
action="store_true",
help="Perform an analysis of accuracy training on "
"most of the data and validating on an unseen subset",
)
add_global_options(train_g2p_parser)
help_message = "Inspect, download, and save pretrained MFA models"
model_parser = subparsers.add_parser(
"model", aliases=["models"], description=help_message, help=help_message
)
model_subparsers = model_parser.add_subparsers(dest="action")
model_subparsers.required = True
help_message = "Download a pretrained model from the MFA repository"
model_download_parser = model_subparsers.add_parser(
"download", description=help_message, help=help_message
)
model_download_parser.add_argument(
"model_type", choices=sorted(MODEL_TYPES), help="Type of model to download"
)
model_download_parser.add_argument(
"name",
help="Name of language code to download, if not specified, "
"will list all available languages",
type=str,
nargs="?",
)
help_message = "List of saved models"
model_list_parser = model_subparsers.add_parser(
"list", description=help_message, help=help_message
)
model_list_parser.add_argument(
"model_type",
choices=sorted(MODEL_TYPES),
type=str,
nargs="?",
help="Type of model to list",
)
help_message = "Inspect a model and output its metadata"
model_inspect_parser = model_subparsers.add_parser(
"inspect", description=help_message, help=help_message
)
model_inspect_parser.add_argument(
"model_type",
choices=sorted(MODEL_TYPES),
type=str,
nargs="?",
help="Type of model to download",
)
model_inspect_parser.add_argument(
"name", type=str, help="Name of pretrained model or path to MFA model to inspect"
)
help_message = "Save a MFA model to the pretrained directory for name-based referencing"
model_save_parser = model_subparsers.add_parser(
"save", description=help_message, help=help_message
)
model_save_parser.add_argument(
"model_type", type=str, choices=sorted(MODEL_TYPES), help="Type of MFA model"
)
model_save_parser.add_argument(
"path", help="Path to MFA model to save for invoking with just its name"
)
model_save_parser.add_argument(
"--name",
help="Name to use as reference (defaults to the name of the zip file",
type=str,
default="",
)
model_save_parser.add_argument(
"--overwrite",
help="Flag to overwrite existing pretrained models with the same name (and model type)",
action="store_true",
)
train_lm_parser = subparsers.add_parser(
"train_lm", help="Train a language model from a corpus"
)
train_lm_parser.add_argument(
"source_path",
type=str,
help="Full path to the source directory to train from, alternatively "
"an ARPA format language model to convert for MFA use",
)
train_lm_parser.add_argument(
"output_model_path", type=str, help="Full path to save resulting language model"
)
train_lm_parser.add_argument(
"-m",
"--model_path",
type=str,
help="Full path to existing language model to merge probabilities",
)
train_lm_parser.add_argument(
"-w",
"--model_weight",
type=float,
default=1.0,
help="Weight factor for supplemental language model, defaults to 1.0",
)
train_lm_parser.add_argument(
"--dictionary_path", type=str, help=dictionary_path_help, default=""
)
train_lm_parser.add_argument(
"--config_path",
type=str,
default="",
help="Path to config file to use for training and alignment",
)
add_global_options(train_lm_parser)
train_dictionary_parser = subparsers.add_parser(
"train_dictionary",
help="Calculate pronunciation probabilities for a dictionary based on alignment results in a corpus",
)
train_dictionary_parser.add_argument(
"corpus_directory", help="Full path to the directory to align"
)
train_dictionary_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help)
train_dictionary_parser.add_argument(
"acoustic_model_path",
type=str,
help=acoustic_model_path_help,
)
train_dictionary_parser.add_argument(
"output_directory",
type=str,
help="Full path to output directory, will be created if it doesn't exist",
)
train_dictionary_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for alignment"
)
train_dictionary_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
add_global_options(train_dictionary_parser)
train_ivector_parser = subparsers.add_parser(
"train_ivector",
help="Train an ivector extractor from a corpus and pretrained acoustic model",
)
train_ivector_parser.add_argument(
"corpus_directory",
type=str,
help="Full path to the source directory to train the ivector extractor",
)
train_ivector_parser.add_argument(
"output_model_path",
type=str,
help="Full path to save resulting ivector extractor",
)
train_ivector_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of filenames to use for determining speaker, "
"default is to use directory names",
)
train_ivector_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for training"
)
add_global_options(train_ivector_parser)
classify_speakers_parser = subparsers.add_parser(
"classify_speakers", help="Use an ivector extractor to cluster utterances into speakers"
)
classify_speakers_parser.add_argument(
"corpus_directory",
type=str,
help="Full path to the source directory to run speaker classification",
)
classify_speakers_parser.add_argument(
"ivector_extractor_path", type=str, default="", help=ivector_model_path_help
)
classify_speakers_parser.add_argument(
"output_directory",
type=str,
help="Full path to output directory, will be created if it doesn't exist",
)
classify_speakers_parser.add_argument(
"-s", "--num_speakers", type=int, default=0, help="Number of speakers if known"
)
classify_speakers_parser.add_argument(
"--cluster", help="Using clustering instead of classification", action="store_true"
)
classify_speakers_parser.add_argument(
"--config_path",
type=str,
default="",
help="Path to config file to use for ivector extraction",
)
add_global_options(classify_speakers_parser)
create_segments_parser = subparsers.add_parser(
"create_segments", help="Create segments based on voice activity dectection (VAD)"
)
create_segments_parser.add_argument(
"corpus_directory", help="Full path to the source directory to run VAD segmentation"
)
create_segments_parser.add_argument(
"output_directory",
type=str,
help="Full path to output directory, will be created if it doesn't exist",
)
create_segments_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for segmentation"
)
add_global_options(create_segments_parser)
transcribe_parser = subparsers.add_parser(
"transcribe",
help="Transcribe utterances using an acoustic model, language model, and pronunciation dictionary",
)
transcribe_parser.add_argument(
"corpus_directory", type=str, help="Full path to the directory to transcribe"
)
transcribe_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help)
transcribe_parser.add_argument(
"acoustic_model_path",
type=str,
help=acoustic_model_path_help,
)
transcribe_parser.add_argument(
"language_model_path",
type=str,
help=language_model_path_help,
)
transcribe_parser.add_argument(
"output_directory",
type=str,
help="Full path to output directory, will be created if it doesn't exist",
)
transcribe_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for transcription"
)
transcribe_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
transcribe_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
transcribe_parser.add_argument(
"-e",
"--evaluate",
dest="evaluation_mode",
help="Evaluate the transcription against golden texts",
action="store_true",
)
add_global_options(transcribe_parser)
config_parser = subparsers.add_parser(
"configure",
help="The configure command is used to set global defaults for MFA so "
"you don't have to set them every time you call an MFA command.",
)
config_parser.add_argument(
"-t",
"--temp_directory",
"--temporary_directory",
dest="temporary_directory",
type=str,
default="",
help=f"Set the default temporary directory, default is {GLOBAL_CONFIG['temporary_directory']}",
)
config_parser.add_argument(
"-j",
"--num_jobs",
type=int,
help=f"Set the number of processes to use by default, defaults to {GLOBAL_CONFIG['num_jobs']}",
)
config_parser.add_argument(
"--always_clean",
help="Always remove files from previous runs by default",
action="store_true",
)
config_parser.add_argument(
"--never_clean",
help="Don't remove files from previous runs by default",
action="store_true",
)
config_parser.add_argument(
"--always_verbose", help="Default to verbose output", action="store_true"
)
config_parser.add_argument(
"--never_verbose", help="Default to non-verbose output", action="store_true"
)
config_parser.add_argument(
"--always_debug", help="Default to running debugging steps", action="store_true"
)
config_parser.add_argument(
"--never_debug", help="Default to not running debugging steps", action="store_true"
)
config_parser.add_argument(
"--always_overwrite", help="Always overwrite output files", action="store_true"
)
config_parser.add_argument(
"--never_overwrite",
help="Never overwrite output files (if file already exists, "
"the output will be saved in the temp directory)",
action="store_true",
)
config_parser.add_argument(
"--disable_mp",
help="Disable all multiprocessing (not recommended as it will usually "
"increase processing times)",
action="store_true",
)
config_parser.add_argument(
"--enable_mp",
help="Enable multiprocessing (recommended and enabled by default)",
action="store_true",
)
config_parser.add_argument(
"--disable_textgrid_cleanup",
help="Disable postprocessing of TextGrids that cleans up "
"silences and recombines compound words and clitics",
action="store_true",
)
config_parser.add_argument(
"--enable_textgrid_cleanup",
help="Enable postprocessing of TextGrids that cleans up "
"silences and recombines compound words and clitics",
action="store_true",
)
config_parser.add_argument(
"--disable_detect_phone_set",
help="Disable auto-detecting phone sets from the dictionary during training",
action="store_true",
)
config_parser.add_argument(
"--enable_detect_phone_set",
help="Enable auto-detecting phone sets from the dictionary during training",
action="store_true",
)
config_parser.add_argument(
"--disable_terminal_colors", help="Turn off colored text in output", action="store_true"
)
config_parser.add_argument(
"--enable_terminal_colors", help="Turn on colored text in output", action="store_true"
)
config_parser.add_argument(
"--terminal_width",
help=f"Set width of terminal output, "
f"currently set to {GLOBAL_CONFIG['terminal_width']}",
default=GLOBAL_CONFIG["terminal_width"],
type=int,
)
config_parser.add_argument(
"--blas_num_threads",
help=f"Number of threads to use for BLAS libraries, 1 is recommended "
f"due to how much MFA relies on multiprocessing. "
f"Currently set to {GLOBAL_CONFIG['blas_num_threads']}",
default=GLOBAL_CONFIG["blas_num_threads"],
type=int,
)
history_parser = subparsers.add_parser("history", help="Show previously run mfa commands")
_ = subparsers.add_parser("thirdparty", help="DEPRECATED: Please install Kaldi via conda.")
_ = subparsers.add_parser(
"download", help="DEPRECATED: Please use mfa model download instead."
)
history_parser.add_argument(
"depth", type=int, help="Number of commands to list", nargs="?", default=10
)
history_parser.add_argument(
"-v",
"--verbose",
help=f"Output debug messages, default is {GLOBAL_CONFIG['verbose']}",
action="store_true",
)
_ = subparsers.add_parser(
"anchor", aliases=["annotator"], help="Launch Anchor Annotator (if installed)"
)
return parser
parser = create_parser()
def print_history(args):
depth = args.depth
history = load_command_history()[-depth:]
if args.verbose:
print("command\tDate\tExecution time\tVersion\tExit code\tException")
for h in history:
execution_time = time.strftime("%H:%M:%S", time.gmtime(h["execution_time"]))
d = h["date"].isoformat()
print(
f"{h['command']}\t{d}\t{execution_time}\t{h['version']}\t{h['exit_code']}\t{h['exception']}"
)
pass
else:
for h in history:
print(h["command"])
def main() -> None:
"""
Main function for the MFA command line interface
"""
check_third_party()
hooks = ExitHooks()
hooks.hook()
atexit.register(hooks.history_save_handler)
from colorama import init
init()
parser = create_parser()
mp.freeze_support()
args, unknown = parser.parse_known_args()
for short in ["-c", "-d"]:
if short in unknown:
print(
f"Due to the number of options that `{short}` could refer to, it is not accepted. "
"Please specify the full argument",
file=sys.stderr,
)
sys.exit(1)
try:
if args.subcommand in ["g2p", "train_g2p"]:
try:
import pynini # noqa
except ImportError:
print(
"There was an issue importing Pynini, please ensure that it is installed. If you are on Windows, "
"please use the Windows Subsystem for Linux to use g2p functionality.",
file=sys.stderr,
)
sys.exit(1)
if args.subcommand == "align":
run_align_corpus(args, unknown)
elif args.subcommand == "adapt":
run_adapt_model(args, unknown)
elif args.subcommand == "train":
run_train_acoustic_model(args, unknown)
elif args.subcommand == "g2p":
run_g2p(args, unknown)
elif args.subcommand == "train_g2p":
run_train_g2p(args, unknown)
elif args.subcommand == "validate":
run_validate_corpus(args, unknown)
elif args.subcommand in ["model", "models"]:
run_model(args)
elif args.subcommand == "train_lm":
run_train_lm(args, unknown)
elif args.subcommand == "train_dictionary":
run_train_dictionary(args, unknown)
elif args.subcommand == "train_ivector":
run_train_ivector_extractor(args, unknown)
elif args.subcommand == "classify_speakers": # pragma: no cover
run_classify_speakers(args, unknown)
elif args.subcommand in ["annotator", "anchor"]:
run_anchor()
elif args.subcommand == "transcribe":
run_transcribe_corpus(args, unknown)
elif args.subcommand == "create_segments":
run_create_segments(args, unknown)
elif args.subcommand == "configure":
update_global_config(args)
global GLOBAL_CONFIG
GLOBAL_CONFIG = load_global_config()
elif args.subcommand == "history":
print_history(args)
elif args.subcommand == "version":
from montreal_forced_aligner.utils import get_mfa_version
print(get_mfa_version())
elif args.subcommand == "thirdparty": # Deprecated command
raise DeprecationWarning(
"Necessary thirdparty executables are now installed via conda. Please refer to the installation docs for the updated commands."
)
elif args.subcommand == "download": # Deprecated command
raise DeprecationWarning(
"Downloading models is now run through the `mfa model download` command, please use that instead."
)
except MFAError as e:
if getattr(args, "debug", False):
raise
print(e, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
import warnings
warnings.warn(
"Use 'python -m montreal_forced_aligner', not 'python -m montreal_forced_aligner.command_line.mfa'",
DeprecationWarning,
)
main()
|
|
''' Create rst documentation of the examples directory.
This uses screenshots in the screenshots_dir
(currently doc/sources/images/examples) along with source code and files
in the examples/ directory to create rst files in the generation_dir
(doc/sources/examples) gallery.rst, index.rst, and gen__*.rst
'''
import os
import re
from os.path import sep
from os.path import join as slash # just like that name better
from kivy.logger import Logger
import textwrap
base_dir = '..' # from here to the kivy top
examples_dir = slash(base_dir, 'examples')
screenshots_dir = slash(base_dir, 'doc/sources/images/examples')
generation_dir = slash(base_dir, 'doc/sources/examples')
image_dir = "../images/examples/" # relative to generation_dir
gallery_filename = slash(generation_dir, 'gallery.rst')
# Info is a dict built up from
# straight filename information, more from reading the docstring,
# and more from parsing the description text. Errors are often
# shown by setting the key 'error' with the value being the error message.
#
# It doesn't quite meet the requirements for a class, but is a vocabulary
# word in this module.
def iter_filename_info(dir_name):
"""
Yield info (dict) of each matching screenshot found walking the
directory dir_name. A matching screenshot uses double underscores to
separate fields, i.e. path__to__filename__py.png as the screenshot for
examples/path/to/filename.py.
Files not ending with .png are ignored, others are either parsed or
yield an error.
Info fields 'dunder', 'dir', 'file', 'ext', 'source' if not 'error'
"""
pattern = re.compile(r'^((.+)__(.+)__([^-]+))\.png')
for t in os.walk(dir_name):
for filename in t[2]:
if filename.endswith('.png'):
m = pattern.match(filename)
if m is None:
yield {'error': 'png filename not following screenshot'
' pattern: {}'.format(filename)}
else:
d = m.group(2).replace('__', sep)
yield {'dunder': m.group(1),
'dir': d,
'file': m.group(3),
'ext': m.group(4),
'source': slash(d, m.group(3) + '.' + m.group(4))
}
def parse_docstring_info(text):
''' parse docstring from text (normal string with '\n's) and return an info
dict. A docstring should the first triple quoted string, have a title
followed by a line of equal signs, and then a description at
least one sentence long.
fields are 'docstring', 'title', and 'first_sentence' if not 'error'
'first_sentence' is a single line without newlines.
'''
q = '\"\"\"|\'\'\''
p = r'({})\s+([^\n]+)\s+\=+\s+(.*?)(\1)'.format(q)
m = re.search(p, text, re.S)
if m:
comment = m.group(3).replace('\n', ' ')
first_sentence = comment[:comment.find('.') + 1]
return {'docstring': m.group(0), 'title': m.group(2),
'description': m.group(3), 'first_sentence': first_sentence}
else:
return {'error': 'Did not find docstring with title at top of file.'}
def iter_docstring_info(dir_name):
''' Iterate over screenshots in directory, yield info from the file
name and initial parse of the docstring. Errors are logged, but
files with errors are skipped.
'''
for file_info in iter_filename_info(dir_name):
if 'error' in file_info:
Logger.error(file_info['error'])
continue
source = slash(examples_dir, file_info['dir'],
file_info['file'] + '.' + file_info['ext'])
if not os.path.exists(source):
Logger.error('Screen shot references source code that does '
'not exist: %s', source)
continue
with open(source) as f:
text = f.read()
docstring_info = parse_docstring_info(text)
if 'error' in docstring_info:
Logger.error(docstring_info['error'] + ' File: ' + source)
continue # don't want to show ugly entries
else:
file_info.update(docstring_info)
yield file_info
def enhance_info_description(info, line_length=50):
''' Using the info['description'], add fields to info.
info['files'] is the source filename and any filenames referenced by the
magic words in the description, e.g. 'the file xxx.py' or
'The image this.png'. These are as written in the description, do
not allow ../dir notation, and are relative to the source directory.
info['enhanced_description'] is the description, as an array of
paragraphs where each paragraph is an array of lines wrapped to width
line_length. This enhanced description include the rst links to
the files of info['files'].
'''
# make text a set of long lines, one per paragraph.
paragraphs = info['description'].split('\n\n')
lines = [paragraph.replace('\n', ' ') for paragraph in paragraphs]
text = '\n'.join(lines)
info['files'] = [info['file'] + '.' + info['ext']]
regex = r'[tT]he (?:file|image) ([\w\/]+\.\w+)'
for name in re.findall(regex, text):
if name not in info['files']:
info['files'].append(name)
# add links where the files are referenced
folder = '_'.join(info['source'].split(sep)[:-1]) + '_'
text = re.sub(r'([tT]he (?:file|image) )([\w\/]+\.\w+)',
r'\1:ref:`\2 <$folder$\2>`', text)
text = text.replace('$folder$', folder)
# now break up text into array of paragraphs, each an array of lines.
lines = text.split('\n')
paragraphs = [textwrap.wrap(line, line_length) for line in lines]
info['enhanced_description'] = paragraphs
def get_infos(dir_name):
''' return infos, an array info dicts for each matching screenshot in the
dir, sorted by source file name, and adding the field 'num' as he unique
order in this array of dicts'.
'''
infos = [i for i in iter_docstring_info(dir_name)]
infos.sort(key=lambda x: x['source'])
for num, info in enumerate(infos):
info['num'] = num
enhance_info_description(info)
return infos
def make_gallery_page(infos):
''' return string of the rst (Restructured Text) of the gallery page,
showing information on all screenshots found.
'''
def a(s=''):
''' append formatted s to output, which will be joined into lines '''
output.append(s.format(**info))
def t(left='', right=''):
''' append left and right format strings into a table line. '''
l = left.format(**info)
r = right.format(**info)
if len(l) > width1 or len(r) > width2:
Logger.error('items to wide for generated table: "%s" and "%s"',
l, r)
return
output.append('| {0:{w1}} | {1:{w2}} |'
.format(l, r, w1=width1, w2=width2))
gallery_top = '''
Gallery
-------
.. _Tutorials: ../tutorials-index.html
.. container:: title
This gallery lets you explore the many examples included with Kivy.
Click on any screenshot to see the code.
This gallery contains:
* Examples from the examples/ directory that show specific capabilities of
different libraries and features of Kivy.
* Demonstrations from the examples/demos/ directory that explore many of
Kivy's abilities.
There are more Kivy programs elsewhere:
* Tutorials_ walks through the development of complete Kivy applications.
* Unit tests found in the source code under the subdirectory kivy/tests/
can also be useful.
We hope your journey into learning Kivy is exciting and fun!
'''
output = [gallery_top]
for info in infos:
a("\n.. |link{num}| replace:: :doc:`{source}<gen__{dunder}>`")
a("\n.. |pic{num}| image:: ../images/examples/{dunder}.png"
"\n :width: 216pt"
"\n :align: middle"
"\n :target: gen__{dunder}.html")
a("\n.. |title{num}| replace:: **{title}**")
# write the table
width1, width2 = 20, 50 # not including two end spaces
head = '+-' + '-' * width1 + '-+-' + '-' * width2 + '-+'
a()
a(head)
for info in infos:
t('| |pic{num}|', '| |title{num}|')
t('| |link{num}|', '')
paragraphs = info['description'].split("\n\n")
for p in paragraphs:
for line in textwrap.wrap(p, width2):
t('', line)
t() # line between paragraphs
t()
a(head)
return "\n".join(output) + "\n"
def make_detail_page(info):
''' return str of the rst text for the detail page of the file in info. '''
def a(s=''):
''' append formatted s to output, which will be joined into lines '''
output.append(s.format(**info))
output = []
a('{title}')
a('=' * len(info['title']))
a('\n.. |pic{num}| image:: /images/examples/{dunder}.png'
'\n :width: 50%'
'\n :align: middle')
a('\n|pic{num}|')
a()
for paragraph in info['enhanced_description']:
for line in paragraph:
a(line)
a()
# include images
last_lang = '.py'
for fname in info['files']:
full_name = slash(info['dir'], fname)
ext = re.search(r'\.\w+$', fname).group(0)
a('\n.. _`' + full_name.replace(sep, '_') + '`:')
# double separator if building on windows (sphinx skips backslash)
if '\\' in full_name:
full_name = full_name.replace(sep, sep*2)
if ext in ['.png', '.jpg', '.jpeg']:
title = 'Image **' + full_name + '**'
a('\n' + title)
a('~' * len(title))
a('\n.. image:: ../../../examples/' + full_name)
a(' :align: center')
else: # code
title = 'File **' + full_name + '**'
a('\n' + title)
a('~' * len(title))
if ext != last_lang and ext != '.txt':
a('\n.. highlight:: ' + ext[1:])
a(' :linenothreshold: 3')
last_lang = ext
# prevent highlight errors with 'none'
elif ext == '.txt':
a('\n.. highlight:: none')
a(' :linenothreshold: 3')
last_lang = ext
a('\n.. include:: ../../../examples/' + full_name)
a(' :code:')
return '\n'.join(output) + '\n'
def write_file(name, s):
''' write the string to the filename '''
with open(name, 'w') as f:
f.write(s)
def make_index(infos):
''' return string of the rst for the gallery's index.rst file. '''
start_string = '''
Gallery of Examples
===================
.. toctree::
:maxdepth: 1
gallery'''
output = [start_string]
for info in infos:
output.append(' gen__{}'.format(info['dunder']))
return '\n'.join(output) + '\n'
def write_all_rst_pages():
''' Do the main task of writing the gallery,
detail, and index rst pages.
'''
infos = get_infos(screenshots_dir)
s = make_gallery_page(infos)
write_file(gallery_filename, s)
for info in infos:
s = make_detail_page(info)
detail_name = slash(generation_dir,
'gen__{}.rst'.format(info['dunder']))
write_file(detail_name, s)
s = make_index(infos)
index_name = slash(generation_dir, 'index.rst')
write_file(index_name, s)
Logger.info('gallery.py: Created gallery rst documentation pages.')
if __name__ == '__main__':
write_all_rst_pages()
|
|
"""
file_stream tests.
"""
from __future__ import print_function
from dataclasses import dataclass
import json
import pytest
import os
from wandb.sdk.internal.file_stream import CRDedupeFilePolicy
from wandb.sdk.lib import file_stream_utils
from wandb import util
def generate_history():
history = []
history.append(dict(step=0, data=dict(v1=1, v2=2, v3="dog", mystep=1)))
history.append(dict(step=1, data=dict(v1=3, v2=8, v3="cat", mystep=2)))
return history
def convert_history(history_data):
history = []
for h in history_data:
step = h["step"]
data = h["data"]
data["_step"] = step
history.append(data)
return history
def assert_history(publish_util, dropped=None):
history = generate_history()
ctx_util = publish_util(history=history)
converted_history = convert_history(history)
assert ctx_util.history == converted_history
if dropped is not None:
assert ctx_util.dropped_chunks == dropped
def test_fstream_resp_limits_none(publish_util, mock_server, inject_requests):
resp_normal = json.dumps({"exitcode": None})
match = inject_requests.Match(path_suffix="/file_stream")
inject_requests.add(match=match, response=resp_normal)
assert_history(publish_util)
def test_fstream_resp_limits_valid(publish_util, mock_server, inject_requests):
dynamic_settings = {"heartbeat_seconds": 10}
resp_limits = json.dumps({"exitcode": None, "limits": dynamic_settings})
match = inject_requests.Match(path_suffix="/file_stream")
inject_requests.add(match=match, response=resp_limits)
assert_history(publish_util)
# note: we are not testing that the limits changed, only that they were accepted
def test_fstream_resp_limits_malformed(publish_util, mock_server, inject_requests):
dynamic_settings = {"heartbeat_seconds": 10}
resp_limits = json.dumps({"exitcode": None, "limits": "junk"})
match = inject_requests.Match(path_suffix="/file_stream")
inject_requests.add(match=match, response=resp_limits)
assert_history(publish_util)
def test_fstream_resp_malformed(publish_util, mock_server, inject_requests):
resp_invalid = "invalid json {junk broken]"
match = inject_requests.Match(path_suffix="/file_stream")
inject_requests.add(match=match, response=resp_invalid)
assert_history(publish_util)
def test_fstream_status_500(publish_util, mock_server, inject_requests):
match = inject_requests.Match(path_suffix="/file_stream", count=2)
inject_requests.add(match=match, http_status=500)
assert_history(publish_util)
def test_fstream_status_429(publish_util, mock_server, inject_requests):
"""Rate limiting test."""
match = inject_requests.Match(path_suffix="/file_stream", count=2)
inject_requests.add(match=match, http_status=429)
assert_history(publish_util)
def test_fstream_status_404(publish_util, mock_server, inject_requests, capsys):
match = inject_requests.Match(path_suffix="/file_stream", count=2)
inject_requests.add(match=match, http_status=404)
assert_history(publish_util, dropped=1)
stdout, stderr = capsys.readouterr()
assert "Dropped streaming file chunk" in stderr
def test_fstream_status_max_retries(
publish_util, mock_server, inject_requests, mocker, capsys
):
# set short max sleep so we can exhaust retries
mocker.patch("wandb.wandb_sdk.internal.file_stream.MAX_SLEEP_SECONDS", 0.1)
match = inject_requests.Match(path_suffix="/file_stream")
inject_requests.add(match=match, http_status=500)
assert_history(publish_util, dropped=1)
stdout, stderr = capsys.readouterr()
assert "Dropped streaming file chunk" in stderr
def test_fstream_requests_error(
publish_util, mock_server, inject_requests, mocker, capsys
):
# inject a requests error, not a http error
match = inject_requests.Match(path_suffix="/file_stream")
inject_requests.add(match=match, requests_error=True)
history = generate_history()
publish_util(history=history)
stdout, stderr = capsys.readouterr()
assert "Dropped streaming file chunk" in stderr
def test_crdedupe_consecutive_offsets():
fp = CRDedupeFilePolicy()
console = {1: "a", 2: "a", 3: "a", 8: "a", 12: "a", 13: "a", 30: "a"}
intervals = fp.get_consecutive_offsets(console)
print(intervals)
assert intervals == [[1, 3], [8, 8], [12, 13], [30, 30]]
@dataclass
class Chunk:
data: str = None
def test_crdedupe_split_chunk():
fp = CRDedupeFilePolicy()
answer = [
("2020-08-25T20:38:36.895321 ", "this is my line of text\nsecond line\n"),
("ERROR 2020-08-25T20:38:36.895321 ", "this is my line of text\nsecond line\n"),
]
test_data = [
"2020-08-25T20:38:36.895321 this is my line of text\nsecond line\n",
"ERROR 2020-08-25T20:38:36.895321 this is my line of text\nsecond line\n",
]
for i, data in enumerate(test_data):
c = Chunk(data=data)
prefix, rest = fp.split_chunk(c)
assert prefix == answer[i][0]
assert rest == answer[i][1]
def test_crdedupe_process_chunks():
fp = CRDedupeFilePolicy()
sep = os.linesep
files = {"output.log": None}
# Test STDERR progress bar updates (\r lines) overwrite the correct offset.
# Test STDOUT and STDERR normal messages get appended correctly.
chunks = [
Chunk(data=f"timestamp text{sep}"),
Chunk(data=f"ERROR timestamp error message{sep}"),
Chunk(data=f"ERROR timestamp progress bar{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar update 1{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar update 2{sep}"),
Chunk(data=f"timestamp text{sep}text{sep}text{sep}"),
Chunk(data=f"ERROR timestamp error message{sep}"),
]
ret = fp.process_chunks(chunks)
want = [
{
"offset": 0,
"content": [
"timestamp text\n",
"ERROR timestamp error message\n",
"ERROR timestamp progress bar update 2\n",
"timestamp text\n",
"timestamp text\n",
"timestamp text\n",
"ERROR timestamp error message\n",
],
}
]
print(f"\n{ret}")
print(want)
assert ret == want
files["output.log"] = ret
file_requests = list(
file_stream_utils.split_files(files, max_bytes=util.MAX_LINE_BYTES)
)
assert 1 == len(file_requests)
# Test that STDERR progress bar updates in next list of chunks still
# maps to the correct offset.
# Test that we can handle STDOUT progress bars (\r lines) as well.
chunks = [
Chunk(data=f"ERROR timestamp \rprogress bar update 3{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar update 4{sep}"),
Chunk(data=f"timestamp \rstdout progress bar{sep}"),
Chunk(data=f"timestamp text{sep}"),
Chunk(data=f"timestamp \rstdout progress bar update{sep}"),
]
ret = fp.process_chunks(chunks)
want = [
{"offset": 2, "content": ["ERROR timestamp progress bar update 4\n"]},
{"offset": 5, "content": ["timestamp stdout progress bar update\n"]},
{"offset": 7, "content": ["timestamp text\n"]},
]
print(f"\n{ret}")
print(want)
assert ret == want
files["output.log"] = ret
file_requests = list(
file_stream_utils.split_files(files, max_bytes=util.MAX_LINE_BYTES)
)
assert 3 == len(file_requests)
# Test that code handles final progress bar output and correctly
# offsets any new progress bars.
chunks = [
Chunk(data=f"timestamp text{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar final{sep}text{sep}text{sep}"),
Chunk(data=f"ERROR timestamp error message{sep}"),
Chunk(data=f"ERROR timestamp new progress bar{sep}"),
Chunk(data=f"ERROR timestamp \rnew progress bar update 1{sep}"),
]
ret = fp.process_chunks(chunks)
want = [
{"offset": 2, "content": ["ERROR timestamp progress bar final\n"]},
{
"offset": 8,
"content": [
"timestamp text\n",
"ERROR timestamp text\n",
"ERROR timestamp text\n",
"ERROR timestamp error message\n",
"ERROR timestamp new progress bar update 1\n",
],
},
]
print(f"\n{ret}")
print(want)
assert ret == want
files["output.log"] = ret
file_requests = list(
file_stream_utils.split_files(files, max_bytes=util.MAX_LINE_BYTES)
)
assert 2 == len(file_requests)
|
|
from math import sin, cos, pi
from core import SimpleSection, ComplexSection, Dimensions, cached_property
# ==============================================================================
# S I M P L E S E C T I O N S
# ==============================================================================
class Rectangle(SimpleSection):
dimensions = Dimensions(a=None, b=None)
def check_dimensions(self, dims):
if dims.a <= 0:
raise ValueError("Invalid dimensions: a <= 0")
if dims.b <= 0:
raise ValueError("Invalid dimensions: b <= 0")
@cached_property
def A(self):
return self.density * self.a * self.b
@cached_property
def _cog(self):
return 0.0, 0.0
@cached_property
def _I0(self):
_I11 = self.a * self.b**3 / 12.
_I22 = self.b * self.a**3 / 12.
_I12 = 0.0
return tuple(self.density * i for i in (_I11, _I22, _I12))
class CircularSector(SimpleSection):
dimensions = Dimensions(ro=None, ri=None, phi=None)
def check_dimensions(self, dims):
if dims.ri < 0:
raise ValueError("Invalid dimensions: ri < 0")
if dims.ro <= dims.ri:
raise ValueError("Invalid dimensions: ro < ri")
if dims.phi <= 0:
raise ValueError("Invalid dimensions: phi <= 0")
if dims.phi > 2*pi:
raise ValueError("Invalid dimensions: phi > 2*pi")
@cached_property
def A(self):
ro = self.ro
ri = self.ri
phi = self.phi
A = 0.5 * (ro**2 - ri**2) * self.phi
return self.density * A
@cached_property
def _cog(self):
ro = self.ro
ri = self.ri
phi = self.phi
A = self.A / self.density
S2 = 2./3. * (ro**3 - ri**3) * sin(0.5*phi)
_e1 = S2 / A
_e2 = 0.0
return _e1, _e2
@cached_property
def _I0(self):
ro = self.ro
ri = self.ri
phi = self.phi
_e1, _e2 = self._cog
_I11 = self.density * 0.125 * (ro**4 - ri**4) * (phi - sin(phi))
_I22 = self.density * 0.125 * (ro**4 - ri**4) * (phi + sin(phi))
_I12 = self.density * 0.0
return self.parallel_axis((_I11, _I22, _I12), self._cog, reverse=True)
class CircularSegment(SimpleSection):
dimensions = Dimensions(r=None, phi=None)
def check_dimensions(self, dims):
if dims.r <= 0:
raise ValueError("Invalid dimensions: r <= 0")
if dims.phi <= 0:
raise ValueError("Invalid dimensions: phi <= 0")
if dims.phi > 2*pi:
raise ValueError("Invalid dimensions: phi > 0")
@cached_property
def _cog(self):
r, phi = self.r, self.phi
_e1 = 4 * sin(0.5*phi)**3 * r / (3 * (phi - sin(phi)) )
_e2 = 0.0
return _e1, _e2
@cached_property
def A(self):
r, phi = self.r, self.phi
return self.density * 0.5 * r**2 * (phi - sin(phi))
@cached_property
def _I0(self):
r = self.r
phi = self.phi
A = self.A / self.density
e1, e2 = self._cog
I11 = 1. / 48. * r**4 * (6*phi - 8*sin(phi) + sin(2*phi))
I22 = 0.125 * r**4 * (phi - sin(phi)*cos(phi)) - A * e1**2
I12 = 0.0
return tuple(self.density * i for i in (I11, I22, I12))
class Polygon(SimpleSection, list):
@cached_property
def A(self):
if len(self) < 3:
raise ValueError("Cannot calculate A: Polygon must have at least three vertices")
#Area will be positive if vertices are ordered counter-clockwise
x1, x2 = self.__looped_vertices()
n = len(self)
A = 0.5 * sum(x1[i]*x2[i+1] - x1[i+1]*x2[i] for i in range(n))
return self.density * A
@cached_property
def _cog(self):
if len(self) < 3:
raise ValueError("Cannot calculate _cog: Polygon must have at least three vertices")
x1, x2 = self.__looped_vertices()
n = len(self)
A = self.A / self.density
_e1 = 1. / (6 * A) * sum( (x1[i] + x1[i+1]) * (x1[i]*x2[i+1] - x1[i+1]*x2[i]) for i in range(n) )
_e2 = 1. / (6 * A) * sum( (x2[i] + x2[i+1]) * (x1[i]*x2[i+1] - x1[i+1]*x2[i]) for i in range(n) )
return _e1, _e2
@cached_property
def _I0(self):
if len(self) < 3:
raise ValueError("Cannot calculate _I0: Polygon must have at least three vertices")
x1, x2 = self.__looped_vertices()
n = len(self)
_I11 = 1./12. * sum( (x2[i]**2 + x2[i]*x2[i+1] + x2[i+1]**2) * (x1[i]*x2[i+1] - x1[i+1]*x2[i]) for i in range(n))
_I22 = 1./12. * sum( (x1[i]**2 + x1[i]*x1[i+1] + x1[i+1]**2) * (x1[i]*x2[i+1] - x1[i+1]*x2[i]) for i in range(n))
_I12 = 1./24. * sum( (x1[i]*x2[i+1] + 2*x1[i]*x2[i] + 2*x1[i+1]*x2[i+1] + x1[i+1]*x2[i] )*(x1[i]*x2[i+1] - x1[i+1]*x2[i]) for i in range(n))
_I = tuple(self.density * i for i in (_I11, _I22, _I12))
return self.parallel_axis(_I, self._cog, reverse=True)
def __looped_vertices(self):
n = len(self)
x1 = [self[i%n][0] for i in range(n + 1)]
x2 = [self[i%n][1] for i in range(n + 1)]
return x1, x2
# Override list methods which add new items to the list
# Only allow to add items consisting of two values which can be
# convered to float.
# Any change of vertices must call self.reset_cached_properties
# =============================================================
def append(self, vertex):
vertex = self.convert_to_vertices(vertex)[0]
list.append(self, vertex)
self.reset_cached_properties()
def extend(self, vertices):
vertices = self.convert_to_vertices(*vertices)
list.extend(self, vertices)
self.reset_cached_properties()
def insert(self, i, vertex):
vertex = self.convert_to_vertices(vertex)[0]
list.insert(self, i, vertex)
self.reset_cached_properties()
def __setitem__(self, i, vertex):
vertex = self.convert_to_vertices(vertex)[0]
list.__setitem__(self, i, vertex)
self.reset_cached_properties()
def __setslice__(self, i, j, vertices):
vertices = self.convert_to_vertices(*vertices)
list.__setslice__(self, i, j, vertices)
self.reset_cached_properties()
@staticmethod
def convert_to_vertices(*items):
return [(float(x), float(y)) for x, y in items]
# =============================================================
class Triangle(Polygon):
# Override list methods which add new items to the list
# Only allow to add items consisting of two values which can be
# convered to float.
# Any change of vertices must call self.reset_cached_properties
# =============================================================
def append(self, vertex):
if len(self) == 3:
raise IndexError("Triangle cannot have more than 3 vertices")
super(Triangle, self).append(vertex)
def extend(self, vertices):
vertices = self.convert_to_vertices(*vertices)
if len(self) + len(vertices) > 3:
raise IndexError("Triangle cannot have more than 3 vertices")
super(Triangle, self).extend(vertices)
def insert(self, i, vertex):
if len(self) == 3:
raise IndexError("Triangle cannot have more than 3 vertices")
super(Triangle, self).insert(i, vertex)
def __setslice__(self, i, j, vertices):
vertices = self.convert_to_vertices(*vertices)
resulting = self[:]
resulting.__setslice__(i, j, vertices)
if len(resulting) > 3:
raise IndexError("Triangle cannot have more than 3 vertices")
super(Triangle, self).__setslice__(i, j, vertices)
# =============================================================
def reset_cached_properties(self):
if len(self) == 3:
x, y = zip(*self)
v1 = x[1] - x[0], y[1] - y[0]
v2 = x[2] - x[1], y[2] - y[1]
sin = v1[0] * v2[1] - v1[1] * v2[0]
if sin < 0:
self[:] = self[0], self[2], self[1]
super(Triangle, self).reset_cached_properties()
# ==============================================================================
# C O M P L E X S E C T I O N S
# ==============================================================================
class Circle(ComplexSection):
dimensions = Dimensions(r=None)
sections = [CircularSector]
def update_sections(self):
self.sections[0].set_dimensions(ri=0, ro=self.r, phi=2*pi)
class Box(ComplexSection):
dimensions = Dimensions(a=None, b=None, ta=None, tb=None)
sections = [Polygon]
def check_dimensions(self, dims):
if dims.a <= 0:
raise ValueError("Invalid dimensions: a <= 0")
if dims.b <= 0:
raise ValueError("Invalid dimensions: b <= 0")
if dims.ta <= 0:
raise ValueError("Invalid dimensions: ta <= 0")
if dims.tb <= 0:
raise ValueError("Invalid dimensions: tb <= 0")
if dims.a <= 2*dims.tb:
raise ValueError("Invalid dimensions: a <= 2*tb")
if dims.b <= 2*dims.ta:
raise ValueError("Invalid dimensions: b <= 2*ta")
def update_sections(self):
ao = 0.5 * self.a
ai = 0.5 * (self.a - 2*self.tb)
bo = 0.5 * self.b
bi = 0.5 * (self.b - 2*self.ta)
polygon = self.sections[0]
polygon[:] = [
(-ao, -bo),
( ao, -bo),
( ao, bo),
(-ao, bo),
(-ao, -bi),
(-ai, -bi),
(-ai, bi),
( ai, bi),
( ai, -bi),
(-ao, -bi)]
class Ring(ComplexSection):
sections = [CircularSector]
dimensions = Dimensions(ro=None, ri=None)
def update_sections(self):
self.sections[0].set_dimensions(ro=self.ro, ri=self.ri, phi=2*pi)
def check_dimensions(self, dims):
super(Ring, self).check_dimensions(dims)
class Wedge(ComplexSection):
sections = [CircularSector]
dimensions = Dimensions(r=None, phi=None)
def update_sections(self):
self.sections[0].set_dimensions(ro=self.r, ri=0, phi=self.phi)
class WedgeRing(CircularSector):
pass
class BaseFillet(ComplexSection):
sections = [Triangle, CircularSegment]
dimensions = Dimensions(r=None, phi=None)
densities = [1.0, -1.0]
def check_dimensions(self, dims):
if dims.r <= 0:
raise ValueError("Invalid dimensions: r <= 0")
if dims.phi <= 0:
raise ValueError("Invalid dimensions: phi <= 0")
if dims.phi == pi:
raise ValueError("Invalid dimensions: phi = pi")
if dims.phi >= 2*pi:
raise ValueError("Invalid dimensions: phi >= 2*pi")
def update_sections(self):
def sign(x):
return x / abs(x)
alpha = self.phi/2
beta = abs(pi - self.phi)
theta = pi * (self.phi < pi)
a = self.r * cos(alpha) / sin(alpha)
b = self.r * cos(alpha)**2 / sin(alpha) * sign(a)
c = self.r * cos(alpha)
d = self.r / sin(alpha) * sign(a)
triangle = self.sections[0]
triangle[:] = [
(0, 0),
(b, c),
(b, -c)]
segment = self.sections[1]
segment.set_dimensions(r=self.r, phi=beta)
segment.set_position(d1=d, d2=0, theta=theta)
if self.phi > pi:
self.densities = [-d for d in self.__class__.densities]
else:
self.densities = self.__class__.densities[:]
self.set_density(self.density)
class Fillet(ComplexSection):
sections = [BaseFillet]
dimensions = Dimensions(r=None, phi0=None, phi1=None)
def check_dimensions(self, dims):
if dims.r <= 0:
raise ValueError("Invalid dimensions: r <= 0")
if dims.phi1 <= dims.phi0:
raise ValueError("Invalid dimensions: phi1 <= phi0")
if dims.phi1 - dims.phi0 >= 2*pi:
raise ValueError("Invalid dimensions: phi1 - phi0 >= 2*pi")
def update_sections(self):
phi = self.phi1 - self.phi0
theta = 0.5 * (self.phi0 + self.phi1)
self.sections[0].set_dimensions(r=self.r, phi=phi)
self.sections[0].set_position(d1=0, d2=0, theta=theta)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceTapConfigurationsOperations:
"""NetworkInterfaceTapConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
tap_configuration_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
tap_configuration_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified tap configuration from the NetworkInterface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
tap_configuration_name=tap_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
tap_configuration_name: str,
**kwargs: Any
) -> "_models.NetworkInterfaceTapConfiguration":
"""Get the specified tap configuration on a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceTapConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.NetworkInterfaceTapConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_interface_name: str,
tap_configuration_name: str,
tap_configuration_parameters: "_models.NetworkInterfaceTapConfiguration",
**kwargs: Any
) -> "_models.NetworkInterfaceTapConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tap_configuration_parameters, 'NetworkInterfaceTapConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_interface_name: str,
tap_configuration_name: str,
tap_configuration_parameters: "_models.NetworkInterfaceTapConfiguration",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkInterfaceTapConfiguration"]:
"""Creates or updates a Tap configuration in the specified NetworkInterface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:param tap_configuration_parameters: Parameters supplied to the create or update tap
configuration operation.
:type tap_configuration_parameters: ~azure.mgmt.network.v2019_02_01.models.NetworkInterfaceTapConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterfaceTapConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_02_01.models.NetworkInterfaceTapConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
tap_configuration_name=tap_configuration_name,
tap_configuration_parameters=tap_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceTapConfigurationListResult"]:
"""Get all Tap configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceTapConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.NetworkInterfaceTapConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceTapConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations'} # type: ignore
|
|
#!/usr/bin/env python
"""
CmpRuns - A simple tool for comparing two static analyzer runs to determine
which reports have been added, removed, or changed.
This is designed to support automated testing using the static analyzer, from
two perspectives:
1. To monitor changes in the static analyzer's reports on real code bases, for
regression testing.
2. For use by end users who want to integrate regular static analyzer testing
into a buildbot like environment.
"""
import os
import plistlib
#
class multidict:
def __init__(self, elts=()):
self.data = {}
for key,value in elts:
self[key] = value
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
if key in self.data:
self.data[key].append(value)
else:
self.data[key] = [value]
def items(self):
return self.data.items()
def values(self):
return self.data.values()
def keys(self):
return self.data.keys()
def __len__(self):
return len(self.data)
def get(self, key, default=None):
return self.data.get(key, default)
#
class CmpOptions:
def __init__(self, verboseLog=None, root=""):
self.root = root
self.verboseLog = verboseLog
class AnalysisReport:
def __init__(self, run, files):
self.run = run
self.files = files
class AnalysisDiagnostic:
def __init__(self, data, report, htmlReport):
self.data = data
self.report = report
self.htmlReport = htmlReport
def getReadableName(self):
loc = self.data['location']
filename = self.report.run.getSourceName(self.report.files[loc['file']])
line = loc['line']
column = loc['col']
category = self.data['category']
description = self.data['description']
# FIXME: Get a report number based on this key, to 'distinguish'
# reports, or something.
return '%s:%d:%d, %s: %s' % (filename, line, column, category,
description)
def getReportData(self):
if self.htmlReport is None:
return " "
return os.path.join(self.report.run.path, self.htmlReport)
# We could also dump the report with:
# return open(os.path.join(self.report.run.path,
# self.htmlReport), "rb").read()
class AnalysisRun:
def __init__(self, path, opts):
self.path = path
self.reports = []
self.diagnostics = []
self.opts = opts
def getSourceName(self, path):
if path.startswith(self.opts.root):
return path[len(self.opts.root):]
return path
def loadResults(path, opts, deleteEmpty=True):
run = AnalysisRun(path, opts)
for f in os.listdir(path):
if (not f.startswith('report') or
not f.endswith('plist')):
continue
p = os.path.join(path, f)
data = plistlib.readPlist(p)
# Ignore/delete empty reports.
if not data['files']:
if deleteEmpty == True:
os.remove(p)
continue
# Extract the HTML reports, if they exists.
if 'HTMLDiagnostics_files' in data['diagnostics'][0]:
htmlFiles = []
for d in data['diagnostics']:
# FIXME: Why is this named files, when does it have multiple
# files?
assert len(d['HTMLDiagnostics_files']) == 1
htmlFiles.append(d.pop('HTMLDiagnostics_files')[0])
else:
htmlFiles = [None] * len(data['diagnostics'])
report = AnalysisReport(run, data.pop('files'))
diagnostics = [AnalysisDiagnostic(d, report, h)
for d,h in zip(data.pop('diagnostics'),
htmlFiles)]
assert not data
run.reports.append(report)
run.diagnostics.extend(diagnostics)
return run
def compareResults(A, B):
"""
compareResults - Generate a relation from diagnostics in run A to
diagnostics in run B.
The result is the relation as a list of triples (a, b, confidence) where
each element {a,b} is None or an element from the respective run, and
confidence is a measure of the match quality (where 0 indicates equality,
and None is used if either element is None).
"""
res = []
# Quickly eliminate equal elements.
neqA = []
neqB = []
eltsA = list(A.diagnostics)
eltsB = list(B.diagnostics)
eltsA.sort(key = lambda d: d.data)
eltsB.sort(key = lambda d: d.data)
while eltsA and eltsB:
a = eltsA.pop()
b = eltsB.pop()
if a.data['location'] == b.data['location']:
res.append((a, b, 0))
elif a.data > b.data:
neqA.append(a)
eltsB.append(b)
else:
neqB.append(b)
eltsA.append(a)
neqA.extend(eltsA)
neqB.extend(eltsB)
# FIXME: Add fuzzy matching. One simple and possible effective idea would be
# to bin the diagnostics, print them in a normalized form (based solely on
# the structure of the diagnostic), compute the diff, then use that as the
# basis for matching. This has the nice property that we don't depend in any
# way on the diagnostic format.
for a in neqA:
res.append((a, None, None))
for b in neqB:
res.append((None, b, None))
return res
def cmpScanBuildResults(dirA, dirB, opts, deleteEmpty=True):
# Load the run results.
resultsA = loadResults(dirA, opts, deleteEmpty)
resultsB = loadResults(dirB, opts, deleteEmpty)
# Open the verbose log, if given.
if opts.verboseLog:
auxLog = open(opts.verboseLog, "wb")
else:
auxLog = None
diff = compareResults(resultsA, resultsB)
foundDiffs = 0
for res in diff:
a,b,confidence = res
if a is None:
print "ADDED: %r" % b.getReadableName()
foundDiffs += 1
if auxLog:
print >>auxLog, ("('ADDED', %r, %r)" % (b.getReadableName(),
b.getReportData()))
elif b is None:
print "REMOVED: %r" % a.getReadableName()
foundDiffs += 1
if auxLog:
print >>auxLog, ("('REMOVED', %r, %r)" % (a.getReadableName(),
a.getReportData()))
elif confidence:
print "CHANGED: %r to %r" % (a.getReadableName(),
b.getReadableName())
foundDiffs += 1
if auxLog:
print >>auxLog, ("('CHANGED', %r, %r, %r, %r)"
% (a.getReadableName(),
b.getReadableName(),
a.getReportData(),
b.getReportData()))
else:
pass
TotalReports = len(resultsB.diagnostics)
print "TOTAL REPORTS: %r" % TotalReports
print "TOTAL DIFFERENCES: %r" % foundDiffs
if auxLog:
print >>auxLog, "('TOTAL NEW REPORTS', %r)" % TotalReports
print >>auxLog, "('TOTAL DIFFERENCES', %r)" % foundDiffs
return foundDiffs
def main():
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] [dir A] [dir B]")
parser.add_option("", "--root", dest="root",
help="Prefix to ignore on source files",
action="store", type=str, default="")
parser.add_option("", "--verbose-log", dest="verboseLog",
help="Write additional information to LOG [default=None]",
action="store", type=str, default=None,
metavar="LOG")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.error("invalid number of arguments")
dirA,dirB = args
cmpScanBuildResults(dirA, dirB, opts)
if __name__ == '__main__':
main()
|
|
"""Utilities for assessing and repairing CouchDB corruption"""
import logging
from collections import defaultdict
from itertools import islice
from json.decoder import JSONDecodeError
from urllib.parse import urljoin, urlparse, urlunparse
import attr
from couchdbkit import Database
from couchdbkit.exceptions import ResourceNotFound
from dateutil.parser import parse as parse_date
from django.conf import settings
from memoized import memoized
from dimagi.utils.chunked import chunked
from dimagi.utils.couch.bulk import BulkFetchException
from dimagi.utils.couch.database import retry_on_couch_error
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.app_manager.models import Application
from corehq.apps.auditcare.models import AuditEvent
from corehq.apps.fixtures.models import FixtureDataType
from corehq.apps.userreports.models import ReportConfiguration
from corehq.apps.users.models import CommCareUser
from corehq.apps.domain.models import Domain
from corehq.motech.repeaters.models import Repeater
from corehq.toggles.models import Toggle
from corehq.util.couch_helpers import NoSkipArgsProvider
from corehq.util.pagination import ResumableFunctionIterator
log = logging.getLogger(__name__)
COUCH_NODE_PORT = 15984
DOC_TYPES_BY_NAME = {
"main": {
"type": Toggle,
"exclude_types": {
'XFormInstance',
'XFormArchived',
'XFormDeprecated',
'XFormDuplicate',
'XFormError',
'SubmissionErrorLog',
'XFormInstance-Deleted',
'HQSubmission',
"CommCareCase",
"CommCareCase-Deleted",
},
},
"users": {
"type": CommCareUser,
"use_domain": True,
},
"domains": {"type": Domain},
"apps": {
"type": Application,
"use_domain": True,
},
"auditcare": {
"type": AuditEvent,
"use_domain": True,
"view": "auditcare/all_events",
},
"fixtures": {
"type": FixtureDataType,
"use_domain": True
},
"receiver_wrapper_repeaters": {
"type": Repeater,
"use_domain": True,
"view": "repeaters/repeaters",
},
"receiver_wrapper_repeat_records": {
"type": Repeater,
"use_domain": True,
"view": "repeaters/repeat_records",
},
"meta": {
"type": ReportConfiguration,
"use_domain": True
},
}
def count_missing_ids(*args, repair=False):
def log_result(rec):
repaired = f" repaired={rec.repaired}" if repair else ""
log.info(
f" {rec.doc_type}:{repaired} missing={len(rec.missing)} "
f"tries=(avg: {rec.avg_tries}, max: {rec.max_tries})"
)
doc_type = None
rec = None
results = defaultdict(Result)
for doc_type, (missing, tries, repaired) in iter_missing_ids(*args, repair):
if rec and doc_type != rec.doc_type:
log_result(rec)
results.pop(doc_type, None)
rec = results[doc_type]
rec.doc_type = doc_type
rec.missing.update(missing)
rec.tries.append(tries)
rec.repaired += repaired
if rec:
log_result(rec)
else:
log.info("no documents found")
def repair_missing_ids(doc_name, missing_ids_file, line_range, min_tries):
def get_missing(doc_ids):
@retry_on_couch_error
def get_doc_ids():
try:
results = list(db.view("_all_docs", **view_kwargs))
except JSONDecodeError as err:
raise BulkFetchException(f"{type(err).__name__}: {err}") # retry
try:
return {r["id"] for r in results}
except KeyError as err:
raise BulkFetchException(f"{type(err).__name__}: {err}") # retry
view_kwargs = {
"keys": list(doc_ids),
"include_docs": False,
"reduce": False,
}
return find_missing_ids(get_doc_ids, min_tries)[0]
db = CouchCluster(DOC_TYPES_BY_NAME[doc_name]["type"].get_db())
with open(missing_ids_file, encoding="utf-8") as missing_ids:
total = sum(1 for id in missing_ids if id.strip())
missing_ids.seek(0)
missing_ids = (id.strip() for id in missing_ids if id.strip())
if any(line_range):
start, stop = line_range
total = (stop or total) - start
log.info("scanning %s ids on lines %s..%s", total, start, stop or "")
missing_ids = islice(missing_ids, start, stop)
repaired = 0
for doc_ids in chunked(missing_ids, 100, list):
missing = None
for x in range(min_tries):
for doc_id in doc_ids:
log.debug("repairing %s", doc_id)
db.repair(doc_id)
missing = get_missing(doc_ids)
repaired += len(doc_ids) - len(missing)
log.info("repaired %s of %s missing docs", repaired, total)
if not missing:
break
doc_ids = missing
if missing:
log.warning("could not repair %s missing docs", len(missing))
print("\n".join(sorted(missing)))
log.info("repaired %s of %s missing docs", repaired, total)
@attr.s
class Result:
doc_type = attr.ib(default=None)
missing = attr.ib(factory=set)
tries = attr.ib(factory=list)
repaired = attr.ib(default=0)
@property
def max_tries(self):
return max(self.tries) if self.tries else 0
@property
def avg_tries(self):
return round(sum(self.tries) / len(self.tries), 2) if self.tries else 0
def iter_missing_ids(min_tries, params, repair=False):
if params.doc_name == "ALL":
assert not params.doc_type, params
groups = dict(DOC_TYPES_BY_NAME)
if params.domain is not None:
groups = {k: g for k, g in groups.items() if g.get("use_domain")}
else:
groups = {k: g for k, g in groups.items() if not g.get("use_domain")}
else:
groups = {params.doc_name: DOC_TYPES_BY_NAME[params.doc_name]}
for name, group in groups.items():
if params.doc_type:
group = dict(group, doc_types=[params.doc_type])
log.info("processing %s", name)
db = CouchCluster(group["type"].get_db())
domain_name = params.domain if group.get("use_domain") else None
for doc_type in get_doc_types(group):
iter_params = iteration_parameters(
db, doc_type, domain_name, params.view_range, group)
missing_results = _iter_missing_ids(db, min_tries, *iter_params, repair)
try:
for rec in missing_results:
yield doc_type, rec["missing_info"]
finally:
missing_results.discard_state()
def get_doc_types(group):
if "exclude_types" in group:
assert "doc_types" not in group, group
excludes = group["exclude_types"]
db = group["type"].get_db()
results = db.view("all_docs/by_doc_type", group_level=1)
return [r["key"][0] for r in results if r["key"][0] not in excludes]
return group.get("doc_types", [None])
def _iter_missing_ids(db, min_tries, resume_key, view_name, view_params, repair):
def data_function(**view_kwargs):
@retry_on_couch_error
def get_doc_ids():
results = list(db.view(view_name, **view_kwargs))
if "limit" in view_kwargs and results:
nonlocal last_result
last_result = results[-1]
replace_limit_with_endkey(view_kwargs, last_result)
return {r["id"] for r in results}
def replace_limit_with_endkey(view_kwargs, last_result):
assert "endkey_docid" not in view_kwargs, view_kwargs
view_kwargs.pop("limit")
view_kwargs["endkey"] = last_result["key"]
view_kwargs["endkey_docid"] = last_result["id"]
last_result = None
missing, tries = find_missing_ids(get_doc_ids, min_tries=min_tries)
if last_result is None:
log.debug("no results %s - %s", view_kwargs['startkey'], view_kwargs['endkey'])
assert not missing
return []
if missing and repair:
missing, tries2, repaired = repair_couch_docs(db, missing, get_doc_ids, min_tries)
tries += tries2
else:
repaired = 0
log.debug(f"{len(missing)}/{tries} start={view_kwargs['startkey']} {missing or ''}")
last_result["missing_info"] = missing, tries, repaired
return [last_result]
args_provider = NoSkipArgsProvider(view_params)
return ResumableFunctionIterator(resume_key, data_function, args_provider)
def repair_couch_docs(db, missing, get_doc_ids, min_tries):
total_tries = 0
to_repair = len(missing)
max_repairs = min_tries
for n in range(max_repairs):
for doc_id in missing:
db.repair(doc_id)
repaired = missing
missing, tries = find_missing_ids(get_doc_ids, min_tries=min_tries)
total_tries += tries
if log.isEnabledFor(logging.DEBUG):
repaired -= missing
log.debug(f"repaired {to_repair - len(missing)} of {to_repair}: {repaired or ''}")
if not missing:
break
return missing, total_tries, to_repair - len(missing)
def iteration_parameters(db, doc_type, domain, view_range, group, chunk_size=1000):
if "view" in group:
view_name = group["view"]
start = end = "-"
assert doc_type is None, doc_type
if domain is not None:
startkey = [domain]
endkey = [domain]
else:
startkey = []
endkey = []
elif domain is not None:
view_name = 'by_domain_doc_type_date/view'
if doc_type is not None:
startkey = [domain, doc_type]
endkey = [domain, doc_type]
else:
startkey = [domain]
endkey = [domain]
elif doc_type is not None:
view_name = 'all_docs/by_doc_type'
startkey = [doc_type]
endkey = [doc_type]
else:
view_name = 'all_docs/by_doc_type'
startkey = []
endkey = []
if view_range is not None:
assert domain or doc_type, (domain, doc_type)
if group.get("date_range"):
assert domain and doc_type and view_name == 'by_domain_doc_type_date/view', \
(domain, doc_type, view_name, view_range)
view_range = [json_format_datetime(parse_date(x)) for x in view_range]
start, end = view_range
startkey.append(start)
endkey.append(end)
else:
start = end = "-"
if startkey == endkey:
endkey.append({})
view_params = {
'startkey': startkey,
'endkey': endkey,
'limit': chunk_size,
'include_docs': False,
'reduce': False,
}
resume_key = f"{db.dbname}.{domain}.{doc_type}.{start}-{end}"
return resume_key, view_name, view_params
def find_missing_ids(get_doc_ids, min_tries, limit=None):
"""Find missing ids
Given a function that is expected to always return the same set of
unique ids, find all ids that are missing from some result sets.
Returns a tuple `(missing_ids, tries)`
"""
if min_tries < 2:
raise ValueError("min_tries must be greater than 1")
limit = limit or min_tries * 20
min_tries -= 1
missing = set()
all_ids = set()
no_news = 1
for tries in range(limit):
next_ids = get_doc_ids()
if all_ids:
miss = next_ids ^ all_ids
if any(x not in missing for x in miss):
no_news = 1
missing.update(miss)
all_ids.update(miss)
else:
all_ids.update(next_ids)
if no_news > min_tries:
return missing, tries + 1
no_news += 1
log.warning(f"still finding new missing docs after {limit} queries")
return missing, limit
@attr.s
class CouchCluster:
db = attr.ib()
@property
def dbname(self):
return self.db.dbname
@property
@memoized
def _node_dbs(self):
return _get_couch_node_databases(self.db)
@retry_on_couch_error
def repair(self, doc_id):
for node in self._node_dbs:
try:
node.get(doc_id)
except ResourceNotFound:
pass
def view(self, *args, **kw):
return self.db.view(*args, **kw)
def _get_couch_node_databases(db, node_port=COUCH_NODE_PORT):
def node_url(proxy_url, node):
return urlunparse(proxy_url._replace(netloc=f'{auth}@{node}:{node_port}'))
resp = db.server._request_session.get(urljoin(db.server.uri, '/_membership'))
resp.raise_for_status()
membership = resp.json()
nodes = [node.split("@")[1] for node in membership["cluster_nodes"]]
proxy_url = urlparse(settings.COUCH_DATABASE)._replace(path=f"/{db.dbname}")
auth = proxy_url.netloc.split('@')[0]
return [Database(node_url(proxy_url, node)) for node in nodes]
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import ujson
from typing import Any, Mapping, List
from six import string_types
from zerver.lib.emoji import emoji_name_to_emoji_code
from zerver.lib.request import JsonableError
from zerver.lib.test_helpers import tornado_redirected_to_list, get_display_recipient, \
get_test_image_file
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import get_realm, RealmEmoji, Recipient, UserMessage
class ReactionEmojiTest(ZulipTestCase):
def test_missing_emoji(self):
# type: () -> None
"""
Sending reaction without emoji fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/',
**self.api_auth(sender))
self.assertEqual(result.status_code, 400)
def test_add_invalid_emoji(self):
# type: () -> None
"""
Sending invalid emoji fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/foo',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'foo' does not exist")
def test_remove_invalid_emoji(self):
# type: () -> None
"""
Removing invalid emoji fails
"""
sender = self.example_email("hamlet")
result = self.client_delete('/api/v1/messages/1/emoji_reactions/foo',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'foo' does not exist")
def test_add_deactivated_realm_emoji(self):
# type: () -> None
"""
Sending deactivated realm emoji fails.
"""
emoji = RealmEmoji.objects.get(name="green_tick")
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/green_tick',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'green_tick' does not exist")
def test_valid_emoji(self):
# type: () -> None
"""
Reacting with valid emoji succeeds
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/smile',
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
def test_zulip_emoji(self):
# type: () -> None
"""
Reacting with zulip emoji succeeds
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/zulip',
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
def test_valid_emoji_react_historical(self):
# type: () -> None
"""
Reacting with valid emoji on a historical message succeeds
"""
realm = get_realm("zulip")
stream_name = "Saxony"
self.subscribe_to_stream(self.example_email("cordelia"), stream_name, realm=realm)
message_id = self.send_message(self.example_email("cordelia"), stream_name, Recipient.STREAM)
user_profile = self.example_user('hamlet')
sender = user_profile.email
# Verify that hamlet did not receive the message.
self.assertFalse(UserMessage.objects.filter(user_profile=user_profile,
message_id=message_id).exists())
# Have hamlet react to the message
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (message_id,),
**self.api_auth(sender))
self.assert_json_success(result)
# Fetch the now-created UserMessage object to confirm it exists and is historical
user_message = UserMessage.objects.get(user_profile=user_profile, message_id=message_id)
self.assertTrue(user_message.flags.historical)
self.assertTrue(user_message.flags.read)
self.assertFalse(user_message.flags.starred)
def test_valid_realm_emoji(self):
# type: () -> None
"""
Reacting with valid realm emoji succeeds
"""
sender = self.example_email("hamlet")
emoji_name = 'green_tick'
result = self.client_put('/api/v1/messages/1/emoji_reactions/%s' % (emoji_name,),
**self.api_auth(sender))
self.assert_json_success(result)
def test_emoji_name_to_emoji_code(self):
# type: () -> None
"""
An emoji name is mapped canonically to emoji code.
"""
realm = get_realm('zulip')
# Test active realm emoji.
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'green_tick')
self.assertEqual(emoji_code, 'green_tick')
self.assertEqual(reaction_type, 'realm_emoji')
# Test deactivated realm emoji.
emoji = RealmEmoji.objects.get(name="green_tick")
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
with self.assertRaises(JsonableError) as exc:
emoji_name_to_emoji_code(realm, 'green_tick')
self.assertEqual(str(exc.exception), "Emoji 'green_tick' does not exist")
# Test ':zulip:' emoji.
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'zulip')
self.assertEqual(emoji_code, 'zulip')
self.assertEqual(reaction_type, 'zulip_extra_emoji')
# Test unicode emoji.
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'astonished')
self.assertEqual(emoji_code, '1f632')
self.assertEqual(reaction_type, 'unicode_emoji')
# Test override unicode emoji.
overriding_emoji = RealmEmoji.objects.create(
name='astonished', realm=realm, file_name='astonished')
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'astonished')
self.assertEqual(emoji_code, 'astonished')
self.assertEqual(reaction_type, 'realm_emoji')
# Test deactivate over-ridding realm emoji.
overriding_emoji.deactivated = True
overriding_emoji.save(update_fields=['deactivated'])
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'astonished')
self.assertEqual(emoji_code, '1f632')
self.assertEqual(reaction_type, 'unicode_emoji')
# Test override `:zulip:` emoji.
overriding_emoji = RealmEmoji.objects.create(
name='zulip', realm=realm, file_name='zulip')
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'zulip')
self.assertEqual(emoji_code, 'zulip')
self.assertEqual(reaction_type, 'realm_emoji')
# Test non-existent emoji.
with self.assertRaises(JsonableError) as exc:
emoji_name_to_emoji_code(realm, 'invalid_emoji')
self.assertEqual(str(exc.exception), "Emoji 'invalid_emoji' does not exist")
class ReactionMessageIDTest(ZulipTestCase):
def test_missing_message_id(self):
# type: () -> None
"""
Reacting without a message_id fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages//emoji_reactions/smile',
**self.api_auth(sender))
self.assertEqual(result.status_code, 404)
def test_invalid_message_id(self):
# type: () -> None
"""
Reacting to an invalid message id fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/-1/emoji_reactions/smile',
**self.api_auth(sender))
self.assertEqual(result.status_code, 404)
def test_inaccessible_message_id(self):
# type: () -> None
"""
Reacting to a inaccessible (for instance, private) message fails
"""
pm_sender = self.example_email("hamlet")
pm_recipient = self.example_email("othello")
reaction_sender = self.example_email("iago")
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
pm_id = result.json()['id']
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(result, "Invalid message(s)")
class ReactionTest(ZulipTestCase):
def test_add_existing_reaction(self):
# type: () -> None
"""
Creating the same reaction twice fails
"""
pm_sender = self.example_email("hamlet")
pm_recipient = self.example_email("othello")
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
first = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction already exists")
def test_remove_nonexisting_reaction(self):
# type: () -> None
"""
Removing a reaction twice fails
"""
pm_sender = self.example_email("hamlet")
pm_recipient = self.example_email("othello")
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
add = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(add)
first = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction does not exist")
class ReactionEventTest(ZulipTestCase):
def test_add_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = self.example_user('hamlet')
pm_recipient = self.example_user('othello')
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient.email},
**self.api_auth(pm_sender.email))
self.assert_json_success(result)
pm_id = result.json()['id']
expected_recipient_ids = set([pm_sender.id, pm_recipient.id])
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender.email))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender.email)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'add')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
def test_remove_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = self.example_user('hamlet')
pm_recipient = self.example_user('othello')
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient.email},
**self.api_auth(pm_sender.email))
self.assert_json_success(result)
content = result.json()
pm_id = content['id']
expected_recipient_ids = set([pm_sender.id, pm_recipient.id])
add = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender.email))
self.assert_json_success(add)
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender.email))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender.email)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'remove')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
|
|
import sympy
from sympy import Function, Derivative, Expr, Add, Subs, Symbol
from sympy import Matrix
def _func2der(expr):
'''Does the following:
u,xt(u(x, t), x, t) = Derivative(u(x, t), x, t)
'''
if expr.is_Matrix:
if expr.shape == (1, 1):
expr = expr[0,0]
else:
raise NotImplementedError('Matrices or vectors are not supported!')
def _main(expr):
_new = []
for a in expr.args:
is_V = False
if isinstance(a, V):
is_V = True
a = a.expr
if a.is_Function:
name = a.__class__.__name__
for i in a.args:
if i.is_Function:
func = i
break
if ',' in name:
variables = [eval(i) for i in name.split(',')[1]]
a = Derivative(func, *variables)
if 'V' in name:
#TODO remove this string-based control and use class
a = V(a)
a.function = func
#TODO add more, maybe all that have args
elif a.is_Add or a.is_Mul or a.is_Pow:
a = _main(a)
if is_V:
a = V(a)
a.function = func
_new.append( a )
return expr.func(*tuple(_new))
return _main(expr)
def _der2func(expr):
'''Does the following:
Derivative(u(x, t), x, t) = u,xt(u(x, t), x, t)
'''
if expr.is_Matrix:
if expr.shape == (1, 1):
expr = expr[0,0]
else:
raise NotImplementedError('Matrices or vectors are not supported!')
def _main(expr):
_new = []
for a in expr.args:
is_V = False
if isinstance(a, V):
is_V = True
a = a.expr
if a.is_Derivative:
variables = a.atoms()
func = a.expr
variables.add(func)
name = a.expr.__class__.__name__
if ',' in name:
a = Function('%s' % name +
''.join(map(str, a.variables)))(*variables)
else:
a = Function('%s' % name + ',' +
''.join(map(str, a.variables)))(*variables)
#TODO add more, maybe all that have args
elif a.is_Add or a.is_Mul or a.is_Pow:
a = _main(a)
if is_V:
a = V(a)
a.function = func
_new.append( a )
return expr.func(*tuple(_new))
return _main(expr)
def subs2func(expr):
'''Does the following:
Subs(Derivative(w,x(a, x, t), a), (a,), (b,)) = Dw,x(b, x, t)
'''
def _main(expr):
_new = []
for a in expr.args:
if isinstance(a, Subs):
f = a.args[0].expr
args = tuple((i for i in f.args if not i in a.args[1]))
args += a.args[2]
#TODO remove this string-based control and use a class
#TODO add a check if some other function has ^V
a = Function('V%s' % f.__class__.__name__)(*args)
#TODO add more, maybe all that have args
elif a.is_Add or a.is_Mul or a.is_Pow:
a = _main(a)
_new.append( a )
return expr.func(*tuple(_new))
return _main(expr)
class V(Expr):
def __init__(self, expr, *args):
super(V, self).__init__(*args)
self.function = None
self.expr = expr
def __repr__(self):
return 'V' + repr(self.expr)
def __str__(self):
return self.__repr__()
class Vexpr(Expr):
__slots__ = ['functions', '_functions', 'expr',
'integrands', 'non_integrands']
def __init__(self, expr, *functions):
self._functions = []
self.integrands = {}
self.non_integrands = {}
if functions[0]=='NOTEVAL':
self.expr = expr
self.functions = functions[1:]
return
else:
self.expr = expr
self.functions = functions
self._include_variational_operator()
self._integrate_by_parts()
def __repr__(self):
return self.expr.__repr__()
def __str__(self):
return self.__repr__()
def _include_variational_operator(self):
_add = ()
expr = _der2func(self.expr)
for function in self.functions:
name = function.__class__.__name__
_function = Symbol(name.upper()*3)
derivate = _function*expr.diff(function)
self._functions.append(_function)
_add += (derivate, )
expr = Add(*_add)
expr = subs2func(expr)
self.expr = sympy.simplify(expr)
def _integrate_by_parts(self):
'''Integrates by parts changing ``Integral(a*Dw,x, x)`` into:
``Integral(-a,x*Dw) + a*Dw``
The resulting expressions inside and outside the integrand
are stored in ``self.integrand`` and ``self.non_integrand``,
respectively.
'''
def _aux_integrate_by_parts(a):
variables = None
needs_integration_by_parts = False
new = []
for i in a.args:
if isinstance(i, V):
if not variables:
needs_integration_by_parts = True
varexpr = i
der = varexpr.expr
func = der.expr
variables = der.variables
else:
raise NotImplementedError(
'Two variations in the same expression!')
else:
new.append(i)
# "integrand" with all the terms but the one with the variational
# operator
integrand = a.func(*tuple(new))
non_integrand = 0
if not needs_integration_by_parts:
return integrand, non_integrand
func_vars = list(der.variables)
for i, var in enumerate(variables):
func_vars.remove(var)
if func_vars:
new_varexpr = V(Derivative(func, *tuple(func_vars)))
new_varexpr.function = varexpr.function
non_integrand += (-1)**(i)*integrand*new_varexpr
else:
non_integrand += (-1)**(i)*integrand
integrand = (-1)*integrand.diff(var)
return integrand, non_integrand
self.integrands = {}
self.non_integrands = {}
for _function, function in zip(self._functions, self.functions):
integrands = []
non_integrands = []
d = sympy.collect(self.expr.expand(), _function, evaluate=False)
a = _func2der(d[_function])
if a.is_Add:
for b in a.args:
integrand, non_integrand = _aux_integrate_by_parts(b)
integrands.append(integrand)
non_integrands.append(non_integrand)
elif isinstance(a, Expr):
integrand, non_integrand = _aux_integrate_by_parts(a)
integrands.append(integrand)
non_integrands.append(non_integrand)
else:
print(a)
raise ('Check here, something is wrong!')
name = function.__class__.__name__
self.integrands[name] = Add(*integrands)
self.non_integrands[name] = Add(*non_integrands)
def test_simple():
sympy.var('x, y, r')
u = Function('u')(x, y)
w = Function('w')(x, y)
f = Function('f')(x, y)
e = (u.diff(x) + 1./2*w.diff(x,x)**2)*f.diff(x,y) \
+ w.diff(x,y)*f.diff(x,x)
return Vexpr(e, u, w)
def test_cylinder_clpt():
'''Test case where the functional corresponds to the internal energy of
a cylinder using the Classical Laminated Plate Theory (CLPT)
'''
from sympy import Matrix
sympy.var('x, y, r')
sympy.var('B11, B12, B16, B21, B22, B26, B61, B62, B66')
sympy.var('D11, D12, D16, D21, D22, D26, D61, D62, D66')
# displacement field
u = Function('u')(x, y)
v = Function('v')(x, y)
w = Function('w')(x, y)
# stress function
f = Function('f')(x, y)
# laminate constitute matrices B represents B*, see Jones (1999)
B = Matrix([[B11, B12, B16],
[B21, B22, B26],
[B61, B62, B66]])
# D represents D*, see Jones (1999)
D = Matrix([[D11, D12, D16],
[D12, D22, D26],
[D16, D26, D66]])
# strain-diplacement equations
e = Matrix([[u.diff(x) + 1./2*w.diff(x)**2],
[v.diff(y) + 1./r*w + 1./2*w.diff(y)**2],
[u.diff(y) + v.diff(x) + w.diff(x)*w.diff(y)]])
k = Matrix([[ -w.diff(x, x)],
[ -w.diff(y, y)],
[-2*w.diff(x, y)]])
# representing the internal forces using the stress function
N = Matrix([[ f.diff(y, y)],
[ f.diff(x, x)],
[ -f.diff(x, y)]])
functional = N.T*e - N.T*B*k + 1./2*k.T*D.T*k
return Vexpr(functional, u, v, w)
if __name__ == '__main__':
print test_cylinder_clpt().integrands
#TODO
# implement a class that allows N.T*Vexpr(e, u, v, w), for example
# and any other type of algebraic operations
|
|
# Natural Language Toolkit: Texts
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
This module brings together a variety of NLTK functionality for
text analysis, and provides simple, interactive interfaces.
Functionality includes: concordancing, collocation discovery,
regular expression search over tokenized strings, and
distributional similarity.
"""
from __future__ import print_function
from math import log
from collections import defaultdict
import re
from nltk.probability import FreqDist, LidstoneProbDist
from nltk.probability import ConditionalFreqDist as CFD
from nltk.util import tokenwrap, LazyConcatenation
from nltk.model import NgramModel
from nltk.metrics import f_measure, BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
class ContextIndex(object):
"""
A bidirectional index between words and their 'contexts' in a text.
The context of a word is usually defined to be the words that occur
in a fixed window around the word; but other definitions may also
be used by providing a custom context function.
"""
@staticmethod
def _default_context(tokens, i):
"""One left token and one right token, normalized to lowercase"""
left = (tokens[i-1].lower() if i != 0 else '*START*')
right = (tokens[i+1].lower() if i != len(tokens) - 1 else '*END*')
return (left, right)
def __init__(self, tokens, context_func=None, filter=None, key=lambda x:x):
self._key = key
self._tokens = tokens
if context_func:
self._context_func = context_func
else:
self._context_func = self._default_context
if filter:
tokens = [t for t in tokens if filter(t)]
self._word_to_contexts = CFD((self._key(w), self._context_func(tokens, i))
for i, w in enumerate(tokens))
self._context_to_words = CFD((self._context_func(tokens, i), self._key(w))
for i, w in enumerate(tokens))
def tokens(self):
"""
:rtype: list(str)
:return: The document that this context index was
created from.
"""
return self._tokens
def word_similarity_dict(self, word):
"""
Return a dictionary mapping from words to 'similarity scores,'
indicating how often these two words occur in the same
context.
"""
word = self._key(word)
word_contexts = set(self._word_to_contexts[word])
scores = {}
for w, w_contexts in self._word_to_contexts.items():
scores[w] = f_measure(word_contexts, set(w_contexts))
return scores
def similar_words(self, word, n=20):
scores = defaultdict(int)
for c in self._word_to_contexts[self._key(word)]:
for w in self._context_to_words[c]:
if w != word:
print(w, c, self._context_to_words[c][word], self._context_to_words[c][w])
scores[w] += self._context_to_words[c][word] * self._context_to_words[c][w]
return sorted(scores, key=scores.get)[:n]
def common_contexts(self, words, fail_on_unknown=False):
"""
Find contexts where the specified words can all appear; and
return a frequency distribution mapping each context to the
number of times that context was used.
:param words: The words used to seed the similarity search
:type words: str
:param fail_on_unknown: If true, then raise a value error if
any of the given words do not occur at all in the index.
"""
words = [self._key(w) for w in words]
contexts = [set(self._word_to_contexts[w]) for w in words]
empty = [words[i] for i in range(len(words)) if not contexts[i]]
common = reduce(set.intersection, contexts)
if empty and fail_on_unknown:
raise ValueError("The following word(s) were not found:",
" ".join(words))
elif not common:
# nothing in common -- just return an empty freqdist.
return FreqDist()
else:
fd = FreqDist(c for w in words
for c in self._word_to_contexts[w]
if c in common)
return fd
class ConcordanceIndex(object):
"""
An index that can be used to look up the offset locations at which
a given word occurs in a document.
"""
def __init__(self, tokens, key=lambda x:x):
"""
Construct a new concordance index.
:param tokens: The document (list of tokens) that this
concordance index was created from. This list can be used
to access the context of a given word occurrence.
:param key: A function that maps each token to a normalized
version that will be used as a key in the index. E.g., if
you use ``key=lambda s:s.lower()``, then the index will be
case-insensitive.
"""
self._tokens = tokens
"""The document (list of tokens) that this concordance index
was created from."""
self._key = key
"""Function mapping each token to an index key (or None)."""
self._offsets = defaultdict(list)
"""Dictionary mapping words (or keys) to lists of offset
indices."""
# Initialize the index (self._offsets)
for index, word in enumerate(tokens):
word = self._key(word)
self._offsets[word].append(index)
def tokens(self):
"""
:rtype: list(str)
:return: The document that this concordance index was
created from.
"""
return self._tokens
def offsets(self, word):
"""
:rtype: list(int)
:return: A list of the offset positions at which the given
word occurs. If a key function was specified for the
index, then given word's key will be looked up.
"""
word = self._key(word)
return self._offsets[word]
def __repr__(self):
return '<ConcordanceIndex for %d tokens (%d types)>' % (
len(self._tokens), len(self._offsets))
def print_concordance(self, word, width=75, lines=25):
"""
Print a concordance for ``word`` with the specified context window.
:param word: The target word
:type word: str
:param width: The width of each line, in characters (default=80)
:type width: int
:param lines: The number of lines to display (default=25)
:type lines: int
"""
half_width = (width - len(word) - 2) / 2
context = width/4 # approx number of words of context
offsets = self.offsets(word)
if offsets:
lines = min(lines, len(offsets))
print("Displaying %s of %s matches:" % (lines, len(offsets)))
for i in offsets:
if lines <= 0:
break
left = (' ' * half_width +
' '.join(self._tokens[i-context:i]))
right = ' '.join(self._tokens[i+1:i+context])
left = left[-half_width:]
right = right[:half_width]
print(left, self._tokens[i], right)
lines -= 1
else:
print("No matches")
class TokenSearcher(object):
"""
A class that makes it easier to use regular expressions to search
over tokenized strings. The tokenized string is converted to a
string where tokens are marked with angle brackets -- e.g.,
``'<the><window><is><still><open>'``. The regular expression
passed to the ``findall()`` method is modified to treat angle
brackets as nongrouping parentheses, in addition to matching the
token boundaries; and to have ``'.'`` not match the angle brackets.
"""
def __init__(self, tokens):
self._raw = ''.join('<'+w+'>' for w in tokens)
def findall(self, regexp):
"""
Find instances of the regular expression in the text.
The text is a list of tokens, and a regexp pattern to match
a single token must be surrounded by angle brackets. E.g.
>>> from nltk.text import TokenSearcher
>>> from nltk.book import text1, text5, text9
>>> text5.findall("<.*><.*><bro>")
you rule bro; telling you bro; u twizted bro
>>> text1.findall("<a>(<.*>)<man>")
monied; nervous; dangerous; white; white; white; pious; queer; good;
mature; white; Cape; great; wise; wise; butterless; white; fiendish;
pale; furious; better; certain; complete; dismasted; younger; brave;
brave; brave; brave
>>> text9.findall("<th.*>{3,}")
thread through those; the thought that; that the thing; the thing
that; that that thing; through these than through; them that the;
through the thick; them that they; thought that the
:param regexp: A regular expression
:type regexp: str
"""
# preprocess the regular expression
regexp = re.sub(r'\s', '', regexp)
regexp = re.sub(r'<', '(?:<(?:', regexp)
regexp = re.sub(r'>', ')>)', regexp)
regexp = re.sub(r'(?<!\\)\.', '[^>]', regexp)
# perform the search
hits = re.findall(regexp, self._raw)
# Sanity check
for h in hits:
if not h.startswith('<') and h.endswith('>'):
raise ValueError('Bad regexp for TokenSearcher.findall')
# postprocess the output
hits = [h[1:-1].split('><') for h in hits]
return hits
class Text(object):
"""
A wrapper around a sequence of simple (string) tokens, which is
intended to support initial exploration of texts (via the
interactive console). Its methods perform a variety of analyses
on the text's contexts (e.g., counting, concordancing, collocation
discovery), and display the results. If you wish to write a
program which makes use of these analyses, then you should bypass
the ``Text`` class, and use the appropriate analysis function or
class directly instead.
A ``Text`` is typically initialized from a given document or
corpus. E.g.:
>>> import nltk.corpus
>>> from nltk.text import Text
>>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt'))
"""
# This defeats lazy loading, but makes things faster. This
# *shouldn't* be necessary because the corpus view *should* be
# doing intelligent caching, but without this it's running slow.
# Look into whether the caching is working correctly.
_COPY_TOKENS = True
def __init__(self, tokens, name=None):
"""
Create a Text object.
:param tokens: The source text.
:type tokens: sequence of str
"""
if self._COPY_TOKENS:
tokens = list(tokens)
self.tokens = tokens
if name:
self.name = name
elif ']' in tokens[:20]:
end = tokens[:20].index(']')
self.name = " ".join(map(str, tokens[1:end]))
else:
self.name = " ".join(map(str, tokens[:8])) + "..."
#////////////////////////////////////////////////////////////
# Support item & slice access
#////////////////////////////////////////////////////////////
def __getitem__(self, i):
if isinstance(i, slice):
return self.tokens[i.start:i.stop]
else:
return self.tokens[i]
def __len__(self):
return len(self.tokens)
#////////////////////////////////////////////////////////////
# Interactive console methods
#////////////////////////////////////////////////////////////
def concordance(self, word, width=79, lines=25):
"""
Print a concordance for ``word`` with the specified context window.
Word matching is not case-sensitive.
:seealso: ``ConcordanceIndex``
"""
if '_concordance_index' not in self.__dict__:
print("Building index...")
self._concordance_index = ConcordanceIndex(self.tokens,
key=lambda s:s.lower())
self._concordance_index.print_concordance(word, width, lines)
def collocations(self, num=20, window_size=2):
"""
Print collocations derived from the text, ignoring stopwords.
:seealso: find_collocations
:param num: The maximum number of collocations to print.
:type num: int
:param window_size: The number of tokens spanned by a collocation (default=2)
:type window_size: int
"""
if not ('_collocations' in self.__dict__ and self._num == num and self._window_size == window_size):
self._num = num
self._window_size = window_size
print("Building collocations list")
from nltk.corpus import stopwords
ignored_words = stopwords.words('english')
finder = BigramCollocationFinder.from_words(self.tokens, window_size)
finder.apply_freq_filter(2)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
bigram_measures = BigramAssocMeasures()
self._collocations = finder.nbest(bigram_measures.likelihood_ratio, num)
colloc_strings = [w1+' '+w2 for w1, w2 in self._collocations]
print(tokenwrap(colloc_strings, separator="; "))
def count(self, word):
"""
Count the number of times this word appears in the text.
"""
return self.tokens.count(word)
def index(self, word):
"""
Find the index of the first occurrence of the word in the text.
"""
return self.tokens.index(word)
def readability(self, method):
# code from nltk_contrib.readability
raise NotImplementedError
def generate(self, length=100):
"""
Print random text, generated using a trigram language model.
:param length: The length of text to generate (default=100)
:type length: int
:seealso: NgramModel
"""
if '_trigram_model' not in self.__dict__:
print("Building ngram index...")
estimator = lambda fdist, bins: LidstoneProbDist(fdist, 0.2)
self._trigram_model = NgramModel(3, self, estimator=estimator)
text = self._trigram_model.generate(length)
print(tokenwrap(text))
def similar(self, word, num=20):
"""
Distributional similarity: find other words which appear in the
same contexts as the specified word; list most similar words first.
:param word: The word used to seed the similarity search
:type word: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.similar_words()
"""
if '_word_context_index' not in self.__dict__:
print('Building word-context index...')
self._word_context_index = ContextIndex(self.tokens,
filter=lambda x:x.isalpha(),
key=lambda s:s.lower())
# words = self._word_context_index.similar_words(word, num)
word = word.lower()
wci = self._word_context_index._word_to_contexts
if word in wci.conditions():
contexts = set(wci[word])
fd = FreqDist(w for w in wci.conditions() for c in wci[w]
if c in contexts and not w == word)
words = fd.keys()[:num]
print(tokenwrap(words))
else:
print("No matches")
def common_contexts(self, words, num=20):
"""
Find contexts where the specified words appear; list
most frequent common contexts first.
:param word: The word used to seed the similarity search
:type word: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.common_contexts()
"""
if '_word_context_index' not in self.__dict__:
print('Building word-context index...')
self._word_context_index = ContextIndex(self.tokens,
key=lambda s:s.lower())
try:
fd = self._word_context_index.common_contexts(words, True)
if not fd:
print("No common contexts were found")
else:
ranked_contexts = fd.keys()[:num]
print(tokenwrap(w1+"_"+w2 for w1,w2 in ranked_contexts))
except ValueError as e:
print(e)
def dispersion_plot(self, words):
"""
Produce a plot showing the distribution of the words through the text.
Requires pylab to be installed.
:param words: The words to be plotted
:type word: str
:seealso: nltk.draw.dispersion_plot()
"""
from nltk.draw import dispersion_plot
dispersion_plot(self, words)
def plot(self, *args):
"""
See documentation for FreqDist.plot()
:seealso: nltk.prob.FreqDist.plot()
"""
self.vocab().plot(*args)
def vocab(self):
"""
:seealso: nltk.prob.FreqDist
"""
if "_vocab" not in self.__dict__:
print("Building vocabulary index...")
self._vocab = FreqDist(self)
return self._vocab
def findall(self, regexp):
"""
Find instances of the regular expression in the text.
The text is a list of tokens, and a regexp pattern to match
a single token must be surrounded by angle brackets. E.g.
>>> from nltk.book import text1, text5, text9
>>> text5.findall("<.*><.*><bro>")
you rule bro; telling you bro; u twizted bro
>>> text1.findall("<a>(<.*>)<man>")
monied; nervous; dangerous; white; white; white; pious; queer; good;
mature; white; Cape; great; wise; wise; butterless; white; fiendish;
pale; furious; better; certain; complete; dismasted; younger; brave;
brave; brave; brave
>>> text9.findall("<th.*>{3,}")
thread through those; the thought that; that the thing; the thing
that; that that thing; through these than through; them that the;
through the thick; them that they; thought that the
:param regexp: A regular expression
:type regexp: str
"""
if "_token_searcher" not in self.__dict__:
self._token_searcher = TokenSearcher(self)
hits = self._token_searcher.findall(regexp)
hits = [' '.join(h) for h in hits]
print(tokenwrap(hits, "; "))
#////////////////////////////////////////////////////////////
# Helper Methods
#////////////////////////////////////////////////////////////
_CONTEXT_RE = re.compile('\w+|[\.\!\?]')
def _context(self, tokens, i):
"""
One left & one right token, both case-normalized. Skip over
non-sentence-final punctuation. Used by the ``ContextIndex``
that is created for ``similar()`` and ``common_contexts()``.
"""
# Left context
j = i-1
while j>=0 and not self._CONTEXT_RE.match(tokens[j]):
j -= 1
left = (tokens[j] if j != 0 else '*START*')
# Right context
j = i+1
while j<len(tokens) and not self._CONTEXT_RE.match(tokens[j]):
j += 1
right = (tokens[j] if j != len(tokens) else '*END*')
return (left, right)
#////////////////////////////////////////////////////////////
# String Display
#////////////////////////////////////////////////////////////
def __repr__(self):
"""
:return: A string representation of this FreqDist.
:rtype: string
"""
return '<Text: %s>' % self.name
# Prototype only; this approach will be slow to load
class TextCollection(Text):
"""A collection of texts, which can be loaded with list of texts, or
with a corpus consisting of one or more texts, and which supports
counting, concordancing, collocation discovery, etc. Initialize a
TextCollection as follows:
>>> import nltk.corpus
>>> from nltk.text import TextCollection
>>> from nltk.book import text1, text2, text3
>>> gutenberg = TextCollection(nltk.corpus.gutenberg)
>>> mytexts = TextCollection([text1, text2, text3])
Iterating over a TextCollection produces all the tokens of all the
texts in order.
"""
def __init__(self, source, name=None):
if hasattr(source, 'words'): # bridge to the text corpus reader
source = [source.words(f) for f in source.fileids()]
self._texts = source
Text.__init__(self, LazyConcatenation(source))
self._idf_cache = {}
def tf(self, term, text, method=None):
""" The frequency of the term in text. """
return float(text.count(term)) / len(text)
def idf(self, term, method=None):
""" The number of texts in the corpus divided by the
number of texts that the term appears in.
If a term does not appear in the corpus, 0.0 is returned. """
# idf values are cached for performance.
idf = self._idf_cache.get(term)
if idf is None:
matches = len(list(True for text in self._texts if term in text))
# FIXME Should this raise some kind of error instead?
idf = (log(float(len(self._texts)) / matches) if matches else 0.0)
self._idf_cache[term] = idf
return idf
def tf_idf(self, term, text):
return self.tf(term, text) * self.idf(term)
def demo():
from nltk.corpus import brown
text = Text(brown.words(categories='news'))
print(text)
print()
print("Concordance:")
text.concordance('news')
print()
print("Distributionally similar words:")
text.similar('news')
print()
print("Collocations:")
text.collocations()
print()
print("Automatically generated text:")
text.generate()
print()
print("Dispersion plot:")
text.dispersion_plot(['news', 'report', 'said', 'announced'])
print()
print("Vocabulary plot:")
text.plot(50)
print()
print("Indexing:")
print("text[3]:", text[3])
print("text[3:5]:", text[3:5])
print("text.vocab()['news']:", text.vocab()['news'])
if __name__ == '__main__':
demo()
__all__ = ["ContextIndex",
"ConcordanceIndex",
"TokenSearcher",
"Text",
"TextCollection"]
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ChannelWidgetUi.ui'
#
# Created: Thu Aug 14 02:38:51 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Channel(object):
def setupUi(self, Channel):
Channel.setObjectName(_fromUtf8("Channel"))
Channel.resize(303, 350)
self.gridLayout = QtGui.QGridLayout(Channel)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(1)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.frame = QtGui.QFrame(Channel)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Sunken)
self.frame.setObjectName(_fromUtf8("frame"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.frame)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setMargin(1)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.groupBox = QtGui.QGroupBox(self.frame)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setHorizontalSpacing(2)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.lbl_limit_dbg = QtGui.QLabel(self.groupBox)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Ubuntu Mono"))
self.lbl_limit_dbg.setFont(font)
self.lbl_limit_dbg.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_limit_dbg.setObjectName(_fromUtf8("lbl_limit_dbg"))
self.gridLayout_2.addWidget(self.lbl_limit_dbg, 5, 1, 1, 1)
self.lbl_imon_dbg = QtGui.QLabel(self.groupBox)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Ubuntu Mono"))
self.lbl_imon_dbg.setFont(font)
self.lbl_imon_dbg.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_imon_dbg.setObjectName(_fromUtf8("lbl_imon_dbg"))
self.gridLayout_2.addWidget(self.lbl_imon_dbg, 5, 6, 1, 1)
self.line_2 = QtGui.QFrame(self.groupBox)
self.line_2.setFrameShape(QtGui.QFrame.VLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.gridLayout_2.addWidget(self.line_2, 0, 2, 5, 1)
spacerItem = QtGui.QSpacerItem(0, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 2, 5, 1, 1)
self.line_3 = QtGui.QFrame(self.groupBox)
self.line_3.setFrameShape(QtGui.QFrame.VLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.gridLayout_2.addWidget(self.line_3, 0, 7, 5, 1)
self.lbl_set_dbg = QtGui.QLabel(self.groupBox)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Ubuntu Mono"))
self.lbl_set_dbg.setFont(font)
self.lbl_set_dbg.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_set_dbg.setObjectName(_fromUtf8("lbl_set_dbg"))
self.gridLayout_2.addWidget(self.lbl_set_dbg, 5, 3, 1, 1)
self.lbl_imon = QtGui.QLabel(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_imon.sizePolicy().hasHeightForWidth())
self.lbl_imon.setSizePolicy(sizePolicy)
self.lbl_imon.setMaximumSize(QtCore.QSize(16777215, 20))
self.lbl_imon.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_imon.setObjectName(_fromUtf8("lbl_imon"))
self.gridLayout_2.addWidget(self.lbl_imon, 0, 6, 2, 1)
self.lbl_pmon = QtGui.QLabel(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_pmon.sizePolicy().hasHeightForWidth())
self.lbl_pmon.setSizePolicy(sizePolicy)
self.lbl_pmon.setMaximumSize(QtCore.QSize(16777215, 20))
self.lbl_pmon.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_pmon.setObjectName(_fromUtf8("lbl_pmon"))
self.gridLayout_2.addWidget(self.lbl_pmon, 0, 8, 2, 1)
self.line = QtGui.QFrame(self.groupBox)
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout_2.addWidget(self.line, 0, 4, 5, 1)
self.lbl_set = QtGui.QLabel(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_set.sizePolicy().hasHeightForWidth())
self.lbl_set.setSizePolicy(sizePolicy)
self.lbl_set.setMaximumSize(QtCore.QSize(16777215, 20))
self.lbl_set.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_set.setObjectName(_fromUtf8("lbl_set"))
self.gridLayout_2.addWidget(self.lbl_set, 0, 3, 2, 1)
self.lbl_max = QtGui.QLabel(self.groupBox)
self.lbl_max.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_max.setObjectName(_fromUtf8("lbl_max"))
self.gridLayout_2.addWidget(self.lbl_max, 0, 1, 1, 1)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.slider_iset = QtGui.QSlider(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slider_iset.sizePolicy().hasHeightForWidth())
self.slider_iset.setSizePolicy(sizePolicy)
self.slider_iset.setMaximum(250)
self.slider_iset.setOrientation(QtCore.Qt.Vertical)
self.slider_iset.setTickPosition(QtGui.QSlider.TicksBothSides)
self.slider_iset.setTickInterval(25)
self.slider_iset.setObjectName(_fromUtf8("slider_iset"))
self.horizontalLayout_3.addWidget(self.slider_iset)
self.gridLayout_2.addLayout(self.horizontalLayout_3, 2, 3, 2, 1)
self.radio_CP = QtGui.QRadioButton(self.groupBox)
self.radio_CP.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radio_CP.sizePolicy().hasHeightForWidth())
self.radio_CP.setSizePolicy(sizePolicy)
self.radio_CP.setObjectName(_fromUtf8("radio_CP"))
self.gridLayout_2.addWidget(self.radio_CP, 4, 8, 1, 1)
self.radio_CC = QtGui.QRadioButton(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radio_CC.sizePolicy().hasHeightForWidth())
self.radio_CC.setSizePolicy(sizePolicy)
self.radio_CC.setObjectName(_fromUtf8("radio_CC"))
self.gridLayout_2.addWidget(self.radio_CC, 4, 6, 1, 1)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.slider_imax = QtGui.QSlider(self.groupBox)
self.slider_imax.setMaximum(250)
self.slider_imax.setOrientation(QtCore.Qt.Vertical)
self.slider_imax.setTickPosition(QtGui.QSlider.TicksBothSides)
self.slider_imax.setTickInterval(25)
self.slider_imax.setObjectName(_fromUtf8("slider_imax"))
self.horizontalLayout_4.addWidget(self.slider_imax)
self.gridLayout_2.addLayout(self.horizontalLayout_4, 2, 1, 1, 1)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.progbar_imon = QtGui.QProgressBar(self.groupBox)
self.progbar_imon.setMaximum(250)
self.progbar_imon.setOrientation(QtCore.Qt.Vertical)
self.progbar_imon.setObjectName(_fromUtf8("progbar_imon"))
self.horizontalLayout_5.addWidget(self.progbar_imon)
self.gridLayout_2.addLayout(self.horizontalLayout_5, 2, 6, 1, 1)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.progbar_pmon = QtGui.QProgressBar(self.groupBox)
self.progbar_pmon.setMaximum(250)
self.progbar_pmon.setProperty("value", 0)
self.progbar_pmon.setOrientation(QtCore.Qt.Vertical)
self.progbar_pmon.setObjectName(_fromUtf8("progbar_pmon"))
self.horizontalLayout_6.addWidget(self.progbar_pmon)
self.gridLayout_2.addLayout(self.horizontalLayout_6, 2, 8, 1, 1)
self.lbl_pmon_dbg = QtGui.QLabel(self.groupBox)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Ubuntu Mono"))
self.lbl_pmon_dbg.setFont(font)
self.lbl_pmon_dbg.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_pmon_dbg.setObjectName(_fromUtf8("lbl_pmon_dbg"))
self.gridLayout_2.addWidget(self.lbl_pmon_dbg, 5, 8, 1, 1)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
spacerItem1 = QtGui.QSpacerItem(0, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
self.gridLayout_2.addLayout(self.verticalLayout, 0, 0, 5, 1)
self.spin_max = QtGui.QSpinBox(self.groupBox)
self.spin_max.setKeyboardTracking(False)
self.spin_max.setMaximum(250)
self.spin_max.setObjectName(_fromUtf8("spin_max"))
self.gridLayout_2.addWidget(self.spin_max, 4, 1, 1, 1)
self.spin_set = QtGui.QSpinBox(self.groupBox)
self.spin_set.setKeyboardTracking(False)
self.spin_set.setMaximum(250)
self.spin_set.setObjectName(_fromUtf8("spin_set"))
self.gridLayout_2.addWidget(self.spin_set, 4, 3, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout_2)
self.horizontalLayout_2.addWidget(self.groupBox)
self.gridLayout.addWidget(self.frame, 0, 0, 1, 1)
self.retranslateUi(Channel)
QtCore.QMetaObject.connectSlotsByName(Channel)
def retranslateUi(self, Channel):
Channel.setWindowTitle(_translate("Channel", "Form", None))
self.groupBox.setTitle(_translate("Channel", "Channel [n]", None))
self.lbl_limit_dbg.setText(_translate("Channel", "0.000 mA", None))
self.lbl_imon_dbg.setText(_translate("Channel", "0.000 mA", None))
self.lbl_set_dbg.setText(_translate("Channel", "0.000 mA", None))
self.lbl_imon.setText(_translate("Channel", "<html><head/><body><p>I<span style=\" vertical-align:sub;\">mon</span></p></body></html>", None))
self.lbl_pmon.setText(_translate("Channel", "<html><head/><body><p>P<span style=\" vertical-align:sub;\">mon</span></p></body></html>", None))
self.lbl_set.setText(_translate("Channel", "<html><head/><body><p>I<span style=\" vertical-align:sub;\">Set</span></p></body></html>", None))
self.lbl_max.setText(_translate("Channel", "<html><head/><body><p>I<span style=\" vertical-align:sub;\">max</span></p></body></html>", None))
self.radio_CP.setToolTip(_translate("Channel", "Enable constant-power mode", None))
self.radio_CP.setText(_translate("Channel", "CP", None))
self.radio_CC.setToolTip(_translate("Channel", "Enable constant-current mode", None))
self.radio_CC.setText(_translate("Channel", "CC", None))
self.progbar_imon.setFormat(_translate("Channel", "%v mA", None))
self.progbar_pmon.setFormat(_translate("Channel", "%v mW", None))
self.lbl_pmon_dbg.setText(_translate("Channel", "0.000 mA", None))
self.spin_max.setSuffix(_translate("Channel", " mA", None))
self.spin_set.setSuffix(_translate("Channel", " mA", None))
|
|
"""
Basic support for reading the AFNI BRIK/HEAD file format.
This is quite quick-and-dirty, so please use with caution.
Floris van Vugt, April 2017
"""
import re
import numpy as np
import os
def read_header(fname):
"""
Reads AFNI header file. That is, reads the HEAD from a BRIK/HEAD file pair.
Arguments
fname : the filename to be read (if it doesn't end in .HEAD this will be appended)
Returns
dict of key-values representing the header contents
"""
if fname.endswith('.'):
fname+="HEAD"
if not fname.endswith('.HEAD'):
fname+=".HEAD"
# Read the file contents
headf = open(fname,'r').read()
# Now parse the contents
remainder = headf[:]
# No, this is not an insult, it's a pattern that matches the beginning of a chunk,
# i.e. type=something, name=something_else, count=a_number
chunkhead = re.compile(r'type\s*=\s*(integer|float|string)-attribute\s*name\s*=\s*(\w+)\s*count\s*=\s*(\d+)')
header = {}
types = {}
m = re.search(chunkhead,remainder)
type_regexp = {
"integer":"[+-]?[0-9]",
"float":"[-+]?(\d+([.,]\d*)?|[.,]\d+)([eE][-+]?\d+)?" # source: http://stackoverflow.com/questions/4703390/how-to-extract-a-floating-number-from-a-string
}
# While there is another chunk to be found...
while m:
# Parse the type/name/count fields
endpos = m.end()
(tp,name,count) = m.groups()
count = int(count)
header[name]=count
types[name]=tp
#print(m.start(),tp,name,count)
# The rest of the file...
remainder = remainder[endpos:]
# Now read the actual contents
if tp=='integer' or tp=="float":
# Set up a regexp that will capture the next "count" ints
contents = re.match(r'(\s*(%s)+){%i}'%(type_regexp[tp],count),remainder)
cast = int if tp=="integer" else float
if contents:
values = [ cast(i) for i in contents.group().split() ]
header[name]=values if count>1 else values[0]
else:
raise ValueError("Failed to parse contents for %s"%name)
remainder = remainder[contents.end():]
elif tp=="string":
contents = re.match(r'\s*\'(.{%i})~'%(count-1),remainder,re.DOTALL)
if contents:
header[name]=contents.group(1)
else:
raise ValueError("Failed to parse contents for %s"%name)
remainder = remainder[contents.end():]
else:
raise ValueError("Unknown data type %s for %s"%(tp,name))
# Set up for the next iteration of the loop
m = re.search(chunkhead,remainder)
return header #,types
def read_brik(fname,header):
""" Reads BRIK file. Presupposes that we have parsed the .HEAD file and
supplied at least the relevant portion of it.
Arguments
fname : the filename to be read (.BRIK will be added if necessary)
header : the header information, a set of key-values
Returns
array containing the data
"""
if fname.endswith('.'):
fname+="BRIK"
if not fname.endswith('.BRIK'):
fname+=".BRIK"
# Determine the size of the data to be read (this comes from the header)
nx,ny,nz=header["DATASET_DIMENSIONS"][0],header["DATASET_DIMENSIONS"][1],header["DATASET_DIMENSIONS"][2]
ntp = header["DATASET_RANK"][1]
n = nx*ny*nz*ntp #(Info.DATASET_DIMENSIONS(1) .* Info.DATASET_DIMENSIONS(2) .* Info.DATASET_DIMENSIONS(3) .* Info.DATASET_RANK(2)
# Determine the datum type stored at each brick
bt = header.get("BRICK_TYPES",None)
# Check whether the brick data type is a list, in which case the different bricks could have
# different data types, which we are too lazy here to support.
if (type(bt) is list) or (type(bt) is tuple):
if len(bt)>1:
# If this is a list of data types but they are really all the same data type, then it's still okay.
if len(list(set(bt)))==1:
bt = bt[0]
else:
raise ValueError("Error: currently not supporting reading bricks with different data types.")
else:
bt = bt[0]
if bt==0:
dt = "B" # not tested; 0 = byte (unsigned char; 1 byte)
elif bt==1:
dt = "h" # tested; 1 = short (2 bytes, signed)
elif bt==2:
dt = "i" # not tested; 2 = int
elif bt==3:
dt = "f" # not tested; 3 = float (4 bytes, assumed to be IEEE format)
elif bt==4:
dt = "d" # not tested; 4 = double
elif bt==5:
dt = "D" # not tested 5 = complex (8 bytes: real+imaginary parts)
else:
raise ValueError("Unknown data type (BRICK_TYPES=%i)"%bt)
bo = header.get("BYTEORDER_STRING",None)
if bo == "LSB_FIRST": # "<" means little-endian (LSB first)
bo_str = "<"
elif bo == "MSB_FIRST": # ">" means big-endian (MSB first)
bo_str = ">"
else:
bo_str = "="
dt = np.dtype(bo_str+dt)
# Check that the file is indeed of the correct size
fsize = os.path.getsize(fname)
if fsize!=dt.itemsize*n:
raise ValueError("Error reading BRIK file, file size is %i but I expected to read %i voxels."%(fsize,n))
V = np.fromfile(fname, dtype=np.dtype(dt),count=n)
#V = fread(fidBRIK, n) , [Opt.OutPrecision,typestr]);
# For reshaping, the AFNI doc says:
# The voxel with 3-index (i,j,k) in a sub-brick
# is located at position (i+j*nx+k*nx*ny), for
# i=0..nx-1, j=0..ny-1, k=0..nz-1. Each axis must
# have at least 2 points!
# I think this corresponds to what Numpy calls Fortran-style ordering.
V = np.reshape( V, (nx,ny,nz,ntp), order="F" )
# Potentially we need to apply factors to the data (but be careful of overflows!)
ff = header.get("BRICK_FLOAT_FACS",[])
if (type(ff) is list) or (type(ff) is tuple):
for (i,fact) in enumerate(ff):
if i>=ntp:
raise ValueError("Error: header defines BRICK_FLOAT_FACS for nonexistant time point.")
if fact>0: # According to the AFNI specification, fact is non-negative
V[:,:,:,i] = fact*V[:,:,:,i]
else: # If FLOAT_FACS is not a list, then simply apply it to all volumes
if ff>0:
V = ff*V
return V
def read(fname):
""" Read AFNI data file (BRIK/HEAD).
Arguments
fname : the filename to be read
Returns
(header,brik)
header : a dict containing the header information
brik : a multidimensional array containing the voxel data
"""
header = read_header(fname)
brik = read_brik(fname,header)
return (header,brik)
|
|
#!/usr/bin/env python
# a base class other test classes inherit from - some shared functionality
import unittest
import string
import types
import ir
import fracttypes
class TestBase(unittest.TestCase):
def assertNearlyEqual(self,a,b,epsilon=1.0e-12):
# check that each element is within epsilon of expected value
for (ra,rb) in zip(a,b):
if isinstance(ra, types.ListType) or isinstance(ra, types.TupleType):
for (ca,cb) in zip(ra,rb):
d = abs(ca-cb)
self.failUnless(d < epsilon, "%s - %s = %s, > %s" % (ca,cb,d,epsilon))
else:
d = abs(ra-rb)
self.failUnless(d < epsilon, "%s - %s = %s, > %s" % (ra,rb,d,epsilon))
def assertError(self,t,str):
self.assertNotEqual(len(t.errors),0)
for e in t.errors:
if string.find(e,str) != -1:
return
self.fail(("No error matching '%s' raised, errors were %s" % (str, t.errors)))
def assertWarning(self,t,str):
self.assertNotEqual(len(t.warnings),0)
for e in t.warnings:
if string.find(e,str) != -1:
return
self.fail(("No warning matching '%s' raised, warnings were %s" % (str, t.warnings)))
def assertNoErrors(self,t, info=""):
self.assertEqual(len(t.errors),0,
"Unexpected errors %s in %s" % (t.errors, info))
for (name, item) in t.canon_sections.items():
for stm in item:
#print stm.pretty()
self.assertESeqsNotNested(stm,1)
self.assertValidTrace(item)
self.assertWellTyped(t)
def assertValidTrace(self,trace):
# must have each cjump followed by false case
expecting = None
for stm in trace:
if expecting != None:
self.failUnless(isinstance(stm,ir.Label))
self.assertEqual(stm.name,expecting)
expecting = None
elif isinstance(stm, ir.CJump):
expecting = stm.falseDest
def assertPixelIs(self,img,x,y,fates,outcolor=None,incolor=None,efate=None):
self.assertEqual(img.get_all_fates(x,y), fates)
(r,g,b) = (0,0,0)
nsubpixels = 0
for i in xrange(img.FATE_SIZE):
fate = fates[i]
if fate==img.UNKNOWN and efate != None:
fate = efate
if fate == img.OUT:
if outcolor == None:
color = img.WHITE
else:
color = outcolor
else:
if incolor == None:
color = img.BLACK
else:
color = incolor
if fate == img.IN:
index = 0.0
elif fate == img.OUT:
index = 0.0
else:
continue
r += color[0]; g += color[1]; b += color[2]
nsubpixels += 1
if fate != img.UNKNOWN and efate==None:
findex = img.get_color_index(x,y,i)
self.assertEqual(
findex,index,
"unexpected index %.17f for subpixel %d with fate %d" % (findex,i,fate))
color = [r//nsubpixels, g//nsubpixels, b//nsubpixels]
self.assertEqual(img.get_color(x,y),color)
def assertNoProbs(self, t):
self.assertEqual(len(t.warnings),0,
"Unexpected warnings %s" % t.warnings)
self.assertNoErrors(t)
def assertVar(self,t, name,type):
self.assertEquals(t.symbols[name].type,type)
def assertNode(self,name,n):
self.failUnless(isinstance(n,ir.T), ("%s(%s) is not a node" % (n, name)))
def assertTreesEqual(self, name, t1, t2):
if isinstance(t1,types.ListType):
# canonicalized trees are a list, not a Seq()
for (s1,s2) in zip(t1,t2):
self.assertNode(name,s1)
self.assertNode(name,s2)
self.failUnless(
s1.pretty() == s2.pretty(),
("%s, %s should be equivalent (section %s)" %
(s1.pretty(), s2.pretty(), name)))
else:
self.assertNode(name,t1)
self.assertNode(name,t2)
self.failUnless(
t1.pretty() == t2.pretty(),
("%s, %s should be equivalent" % (t1.pretty(), t2.pretty())))
def assertEquivalentTranslations(self,t1,t2):
for (k,item) in t1.sections.items():
self.assertTreesEqual(k,item,t2.sections[k])
for (k,item) in t2.sections.items():
self.assertTreesEqual(k,t1.sections[k], item)
def assertFuncOnList(self,f,nodes,types):
self.assertEqual(len(nodes),len(types))
for (n,t) in zip(nodes,types):
self.failUnless(f(n,t))
def assertESeqsNotNested(self,t,parentAllowsESeq):
'check that no ESeqs are left below other nodes'
if isinstance(t,ir.ESeq):
if parentAllowsESeq:
for child in t.children:
self.assertESeqsNotNested(child,0)
else:
self.fail("tree not well-formed after linearize")
else:
for child in t.children:
self.assertESeqsNotNested(child,0)
def assertJumpsAndLabs(self,t,expected):
jumps_and_labs = []
for n in t.sections["loop"]:
if isinstance(n,ir.Jump):
jumps_and_labs.append("J:%s" % n.dest)
elif isinstance(n,ir.CJump):
jumps_and_labs.append("CJ:%s,%s" % (n.trueDest, n.falseDest))
elif isinstance(n,ir.Label):
jumps_and_labs.append("L:%s" % n.name)
self.assertEqual(jumps_and_labs, expected)
def assertJumpsMatchLabs(self,t):
'check that each jump has a corresponding label somewhere'
jumpTargets = {}
jumpLabels = {}
for n in t:
if isinstance(n,ir.Jump):
jumpTargets[n.dest] = 1
elif isinstance(n,ir.CJump):
jumpTargets[n.trueDest] = jumpTargets[n.falseDest] = 1
elif isinstance(n,ir.Label):
jumpLabels[n.name] = 1
for target in jumpTargets.keys():
self.failUnless(jumpLabels.has_key(target),
"jump to unknown target %s" % target )
def assertBlocksAreWellFormed(self,blocks):
for b in blocks:
self.assertBlockIsWellFormed(b)
def assertBlockIsWellFormed(self,block,startLabel=None, endLabel=None):
self.assertStartsWithLabel(block,startLabel)
self.assertEndsWithJump(block,endLabel)
for stm in block[1:-1]:
if isinstance(stm,ir.Jump) or \
isinstance(stm,ir.CJump) or \
isinstance(stm,ir.Label):
self.fail("%s not allowed mid-basic-block", stm.pretty())
def assertStartsWithLabel(self, block, name=None):
self.failUnless(isinstance(block[0], ir.Label))
if name != None:
self.assertEqual(block[0].name, name)
def assertEndsWithJump(self,block, name=None):
self.failUnless(isinstance(block[-1], ir.Jump) or \
isinstance(block[-1], ir.CJump))
if name != None:
self.assertEqual(block[-1].dest, name)
def assertWellTyped(self,t):
for (key,s) in t.sections.items():
for node in s:
if isinstance(node,ir.T):
ob = node
dt = node.datatype
elif isinstance(node,types.StringType):
try:
sym = t.symbols[node]
except KeyError, err:
self.fail("%s not a symbol in %s" % (node, s.pretty()))
self.failUnless(isinstance(sym,fracttypes.Var),
"weird symbol %s : %s(%s)" %
(node, sym, sym.__class__.__name__))
ob = sym
dt = ob.type
else:
self.fail("%s(%s) not an ir Node" % (node, node.__class__.__name__))
if isinstance(ob,ir.Stm):
self.assertEqual(dt,None,"bad type %s for %s" % (dt, ob))
else:
self.failUnless(dt in fracttypes.typeList,
"bad type %s for %s" % (dt, ob))
|
|
"""
The channel handler, accessed from this module as CHANNEL_HANDLER is a
singleton that handles the stored set of channels and how they are
represented against the cmdhandler.
If there is a channel named 'newbie', we want to be able to just write
newbie Hello!
For this to work, 'newbie', the name of the channel, must be
identified by the cmdhandler as a command name. The channelhandler
stores all channels as custom 'commands' that the cmdhandler can
import and look through.
> Warning - channel names take precedence over command names, so make
sure to not pick clashing channel names.
Unless deleting a channel you normally don't need to bother about the
channelhandler at all - the create_channel method handles the update.
To delete a channel cleanly, delete the channel object, then call
update() on the channelhandler. Or use Channel.objects.delete() which
does this for you.
"""
from builtins import object
from evennia.comms.models import ChannelDB
from evennia.commands import cmdset, command
from django.utils.translation import ugettext as _
class ChannelCommand(command.Command):
"""
Channel
Usage:
<channel name or alias> <message>
This is a channel. If you have subscribed to it, you can send to
it by entering its name or alias, followed by the text you want to
send.
"""
# this flag is what identifies this cmd as a channel cmd
# and branches off to the system send-to-channel command
# (which is customizable by admin)
is_channel = True
key = "general"
help_category = "Channel Names"
obj = None
def parse(self):
"""
Simple parser
"""
# cmdhandler sends channame:msg here.
channelname, msg = self.args.split(":", 1)
self.args = (channelname.strip(), msg.strip())
def func(self):
"""
Create a new message and send it to channel, using
the already formatted input.
"""
channelkey, msg = self.args
caller = self.caller
if not msg:
self.msg(_("Say what?"))
return
channel = ChannelDB.objects.get_channel(channelkey)
if not channel:
self.msg(_("Channel '%s' not found.") % channelkey)
return
if not channel.has_connection(caller):
string = _("You are not connected to channel '%s'.")
self.msg(string % channelkey)
return
if not channel.access(caller, 'send'):
string = _("You are not permitted to send to channel '%s'.")
self.msg(string % channelkey)
return
channel.msg(msg, senders=self.caller, online=True)
def get_extra_info(self, caller, **kwargs):
"""
Let users know that this command is for communicating on a channel.
Args:
caller (TypedObject): A Character or Player who has entered an ambiguous command.
Returns:
A string with identifying information to disambiguate the object, conventionally with a preceding space.
"""
return _(" (channel)")
class ChannelHandler(object):
"""
The ChannelHandler manages all active in-game channels and
dynamically creates channel commands for users so that they can
just give the channek's key or alias to write to it. Whenever a
new channel is created in the database, the update() method on
this handler must be called to sync it with the database (this is
done automatically if creating the channel with
evennia.create_channel())
"""
def __init__(self):
"""
Initializes the channel handler's internal state.
"""
self.cached_channel_cmds = []
self.cached_cmdsets = {}
def __str__(self):
"""
Returns the string representation of the handler
"""
return ", ".join(str(cmd) for cmd in self.cached_channel_cmds)
def clear(self):
"""
Reset the cache storage.
"""
self.cached_channel_cmds = []
def _format_help(self, channel):
"""
Builds an automatic doc string for the channel.
Args:
channel (Channel): Source of help info.
Returns:
doc (str): The docstring for the channel.
"""
key = channel.key
aliases = channel.aliases.all()
ustring = _("%s <message>") % key.lower() + "".join([_("\n %s <message>") % alias.lower() for alias in aliases])
desc = channel.db.desc
string = _(
"""
Channel '%s'
Usage (not including your personal aliases):
%s
%s
""") % (key, ustring, desc)
return string
def add_channel(self, channel):
"""
Add an individual channel to the handler. This should be
called whenever a new channel is created.
Args:
channel (Channel): The channel to add.
Notes:
To remove a channel, simply delete the channel object and
run self.update on the handler. This should usually be
handled automatically by one of the deletion methos of
the Channel itself.
"""
# map the channel to a searchable command
cmd = ChannelCommand(key=channel.key.strip().lower(),
aliases=channel.aliases.all(),
locks="cmd:all();%s" % channel.locks,
help_category="Channel names",
obj=channel,
arg_regex=r"\s.*?",
is_channel=True)
self.cached_channel_cmds.append(cmd)
self.cached_cmdsets = {}
def update(self):
"""
Updates the handler completely, including removing old removed
Channel objects. This must be called after deleting a Channel.
"""
self.cached_channel_cmds = []
self.cached_cmdsets = {}
for channel in ChannelDB.objects.get_all_channels():
self.add_channel(channel)
def get_cmdset(self, source_object):
"""
Retrieve cmdset for channels this source_object has
access to send to.
Args:
source_object (Object): An object subscribing to one
or more channels.
Returns:
cmdsets (list): The Channel-Cmdsets `source_object` has
access to.
"""
if source_object in self.cached_cmdsets:
return self.cached_cmdsets[source_object]
else:
# create a new cmdset holding all channels
chan_cmdset = cmdset.CmdSet()
chan_cmdset.key = '_channelset'
chan_cmdset.priority = 120
chan_cmdset.duplicates = True
for cmd in [cmd for cmd in self.cached_channel_cmds
if cmd.access(source_object, 'send')]:
chan_cmdset.add(cmd)
self.cached_cmdsets[source_object] = chan_cmdset
return chan_cmdset
CHANNEL_HANDLER = ChannelHandler()
CHANNELHANDLER = CHANNEL_HANDLER # legacy
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.codegen.register import build_file_aliases as register_codegen
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.project_info.tasks.filedeps import FileDeps
from pants.build_graph.register import build_file_aliases as register_core
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class FileDepsTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return register_core().merge(register_jvm()).merge(register_codegen())
@classmethod
def task_type(cls):
return FileDeps
def setUp(self):
super(FileDepsTest, self).setUp()
self.context(options={
'scala-platform': {
'runtime': ['tools:scala-library']
}
})
# TODO(John Sirois): Rationalize much of this target emission setup. Lots of tests do similar
# things: https://github.com/pantsbuild/pants/issues/525
def create_target(path, definition, sources=None):
if sources:
self.create_files(path, sources)
self.add_to_build_file(path, definition)
create_target(path='src/scala/core',
definition=dedent("""
scala_library(
name='core',
sources=[
'core1.scala'
],
java_sources=[
'src/java/core'
]
)
"""),
sources=['core1.scala'])
create_target(path='src/java/core',
definition=dedent("""
java_library(
name='core',
sources=globs(
'core*.java',
),
dependencies=[
'src/scala/core'
]
)
"""),
sources=['core1.java', 'core2.java'])
create_target(path='src/resources/lib',
definition=dedent("""
resources(
name='lib',
sources=globs('*.json')
)
"""),
sources=['data.json'])
create_target(path='src/thrift/storage',
definition=dedent("""
java_thrift_library(
name='storage',
sources=[
'data_types.thrift'
]
)
"""),
sources=['src/thrift/storage/data_types.thrift'])
create_target(path='src/java/lib',
definition=dedent("""
java_library(
name='lib',
sources=[
'lib1.java'
],
dependencies=[
'src/scala/core',
'src/thrift/storage'
],
resources=[
'src/resources/lib'
]
)
"""),
sources=['lib1.java'])
# Derive a synthetic target from the src/thrift/storage thrift target as-if doing code-gen.
self.create_file('.pants.d/gen/thrift/java/storage/Angle.java')
self.make_target(spec='.pants.d/gen/thrift/java/storage',
target_type=JavaLibrary,
derived_from=self.target('src/thrift/storage'),
sources=['Angle.java'])
synthetic_java_lib = self.target('.pants.d/gen/thrift/java/storage')
java_lib = self.target('src/java/lib')
java_lib.inject_dependency(synthetic_java_lib.address)
create_target(path='src/java/bin',
definition=dedent("""
jvm_binary(
name='bin',
source='main.java',
main='bin.Main',
dependencies=[
'src/java/lib'
]
)
"""),
sources=['main.java'])
create_target(path='project',
definition=dedent("""
jvm_app(
name='app',
binary='src/java/bin',
bundles=[
bundle(fileset=['config/app.yaml'])
]
)
"""),
sources=['config/app.yaml'])
def test_resources(self):
self.assert_console_output(
'src/resources/lib/BUILD',
'src/resources/lib/data.json',
targets=[self.target('src/resources/lib')]
)
def test_globs(self):
self.assert_console_output(
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core*.java',
targets=[self.target('src/scala/core')],
options=dict(globs=True),
)
def test_globs_app(self):
self.assert_console_output(
'project/config/app.yaml',
'project/BUILD',
'src/java/bin/BUILD',
'src/java/core/BUILD',
'src/java/bin/main.java',
'src/java/core/core*.java',
'src/java/lib/BUILD',
'src/java/lib/lib1.java',
'src/resources/lib/*.json',
'src/resources/lib/BUILD',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/thrift/storage/BUILD',
'src/thrift/storage/data_types.thrift',
targets=[self.target('project:app')],
options=dict(globs=True),
)
def test_scala_java_cycle_scala_end(self):
self.assert_console_output(
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('src/scala/core')]
)
def test_scala_java_cycle_java_end(self):
self.assert_console_output(
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('src/java/core')]
)
def test_concrete_only(self):
self.assert_console_output(
'src/java/lib/BUILD',
'src/java/lib/lib1.java',
'src/thrift/storage/BUILD',
'src/thrift/storage/data_types.thrift',
'src/resources/lib/BUILD',
'src/resources/lib/data.json',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('src/java/lib')]
)
def test_jvm_app(self):
self.assert_console_output(
'project/BUILD',
'project/config/app.yaml',
'src/java/bin/BUILD',
'src/java/bin/main.java',
'src/java/lib/BUILD',
'src/java/lib/lib1.java',
'src/thrift/storage/BUILD',
'src/thrift/storage/data_types.thrift',
'src/resources/lib/BUILD',
'src/resources/lib/data.json',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('project:app')]
)
def assert_console_output(self, *paths, **kwargs):
abs_paths = [os.path.join(self.build_root, path) for path in paths]
super(FileDepsTest, self).assert_console_output(*abs_paths, **kwargs)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import optimizer_v1
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
def adam_update_numpy_amsgrad(param,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
vhat_t = np.maximum(vhat, v_t)
param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon)
return param_t, m_t, v_t, vhat_t
def adam_sparse_update_numpy_amsgrad(param,
indices,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
m_t, v_t, vhat_t, param_t = (np.copy(m), np.copy(v), np.copy(vhat),
np.copy(param))
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
v_hat_t = np.maximum(vhat_t, v_t)
v_hat_t_slice = v_hat_t[indices]
param_t_slice = param[indices] - (
lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon)))
param_t[indices] = param_t_slice
return param_t, m_t, v_t, vhat_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = math_ops.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adam.Adam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [dtypes.int32, dtypes.int64]:
with ops.Graph().as_default(), self.cached_session(
force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adam.Adam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(variables.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.Adam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.Adam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.Adam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testResourceBasic(self):
self.doTestBasic()
@combinations.generate(combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_callable_params=True)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = constant_op.constant(indices_np, dtype=dtypes.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = variables.Variable(var0_np, dtype=dtype)
aggregated_update_var = variables.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(grads0_np, indices,
constant_op.constant([2, 1]))
opt_repeated = adam.Adam(amsgrad=True)
opt_aggregated = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
for t in range(3):
if not context.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var))
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
def testBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.Adam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.Adam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@combinations.generate(combinations.combine(mode=["eager"]))
def testSlotsUniqueEager(self):
v1 = variables.Variable(1.)
v2 = variables.Variable(1.)
opt = adam.Adam(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertLen(set(v.ref() for v in opt.variables()), 5)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
def testSetWeightsFromV1AdamWithoutMinimize(self):
keras_v1_adam = optimizer_v1.Adam()
keras_v2_adam = adam.Adam()
keras_v2_adam.set_weights(keras_v1_adam.get_weights())
keras_v1_iteration = keras_v1_adam.iterations
keras_v2_iteration = keras_v2_adam.iterations
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration))
def testConstructAdamWithLR(self):
opt = adam.Adam(lr=1.0)
opt_2 = adam.Adam(learning_rate=0.1, lr=1.0)
opt_3 = adam.Adam(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adam.NonFusedAdam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of NonFusedAdam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [dtypes.int32, dtypes.int64]:
with ops.Graph().as_default(), self.cached_session(
force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adam.NonFusedAdam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(variables.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.NonFusedAdam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.NonFusedAdam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.NonFusedAdam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testResourceBasic(self):
self.doTestBasic()
@combinations.generate(combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_callable_params=True)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.NonFusedAdam(amsgrad=True)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = constant_op.constant(indices_np, dtype=dtypes.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = variables.Variable(var0_np, dtype=dtype)
aggregated_update_var = variables.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(grads0_np, indices,
constant_op.constant([2, 1]))
opt_repeated = adam.NonFusedAdam(amsgrad=True)
opt_aggregated = adam.NonFusedAdam(amsgrad=True)
if not context.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
for t in range(3):
if not context.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var))
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
def testBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.NonFusedAdam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.NonFusedAdam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.NonFusedAdam(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of NonFusedAdam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.NonFusedAdam()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined NonFusedAdam1 and NonFusedAdam2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Qubole hook"""
import datetime
import logging
import os
import pathlib
import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from qds_sdk.commands import (
Command,
DbExportCommand,
DbImportCommand,
DbTapQueryCommand,
HadoopCommand,
HiveCommand,
JupyterNotebookCommand,
PigCommand,
PrestoCommand,
ShellCommand,
SparkCommand,
SqlCommand,
)
from qds_sdk.qubole import Qubole
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.utils.state import State
if TYPE_CHECKING:
from airflow.utils.context import Context
log = logging.getLogger(__name__)
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand,
"sqlcmd": SqlCommand,
"jupytercmd": JupyterNotebookCommand,
}
POSITIONAL_ARGS = {'hadoopcmd': ['sub_command'], 'shellcmd': ['parameters'], 'pigcmd': ['parameters']}
def flatten_list(list_of_lists) -> list:
"""Flatten the list"""
return [element for array in list_of_lists for element in array]
def filter_options(options: list) -> list:
"""Remove options from the list"""
options_to_remove = ["help", "print-logs-live", "print-logs", "pool"]
return [option for option in options if option not in options_to_remove]
def get_options_list(command_class) -> list:
"""Get options list"""
options_list = [option.get_opt_string().strip("--") for option in command_class.optparser.option_list]
return filter_options(options_list)
def build_command_args() -> Tuple[Dict[str, list], list]:
"""Build Command argument from command and options"""
command_args, hyphen_args = {}, set()
for cmd in COMMAND_CLASSES:
# get all available options from the class
opts_list = get_options_list(COMMAND_CLASSES[cmd])
# append positional args if any for the command
if cmd in POSITIONAL_ARGS:
opts_list += POSITIONAL_ARGS[cmd]
# get args with a hyphen and replace them with underscore
for index, opt in enumerate(opts_list):
if "-" in opt:
opts_list[index] = opt.replace("-", "_")
hyphen_args.add(opts_list[index])
command_args[cmd] = opts_list
return command_args, list(hyphen_args)
COMMAND_ARGS, HYPHEN_ARGS = build_command_args()
class QuboleHook(BaseHook):
"""Hook for Qubole communication"""
conn_name_attr = 'qubole_conn_id'
default_conn_name = 'qubole_default'
conn_type = 'qubole'
hook_name = 'Qubole'
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['login', 'schema', 'port', 'extra'],
"relabeling": {
'host': 'API Endpoint',
'password': 'Auth Token',
},
"placeholders": {'host': 'https://<env>.qubole.com/api'},
}
def __init__(self, *args, **kwargs) -> None:
super().__init__()
conn = self.get_connection(kwargs.get('qubole_conn_id', self.default_conn_name))
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs['task_id']
self.dag_id = kwargs['dag'].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs['command_type']]
self.cmd: Optional[Command] = None
self.task_instance = None
@staticmethod
def handle_failure_retry(context) -> None:
"""Handle retries in case of failures"""
ti = context['ti']
cmd_id = ti.xcom_pull(key='qbol_cmd_id', task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
if cmd.status == 'done':
log.info('Command ID: %s has been succeeded, hence marking this TI as Success.', cmd_id)
ti.state = State.SUCCESS
elif cmd.status == 'running':
log.info('Cancelling the Qubole Command Id: %s', cmd_id)
cmd.cancel()
def execute(self, context: 'Context') -> None:
"""Execute call"""
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
self.task_instance = context['task_instance']
context['task_instance'].xcom_push(key='qbol_cmd_id', value=self.cmd.id) # type: ignore[attr-defined]
self.log.info(
"Qubole command created with Id: %s and Status: %s",
self.cmd.id, # type: ignore[attr-defined]
self.cmd.status, # type: ignore[attr-defined]
)
while not Command.is_done(self.cmd.status): # type: ignore[attr-defined]
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id) # type: ignore[attr-defined]
self.log.info(
"Command Id: %s and Status: %s", self.cmd.id, self.cmd.status # type: ignore[attr-defined]
)
if 'fetch_logs' in self.kwargs and self.kwargs['fetch_logs'] is True:
self.log.info(
"Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log() # type: ignore[attr-defined]
)
if self.cmd.status != 'done': # type: ignore[attr-defined]
raise AirflowException(
'Command Id: {} failed with Status: {}'.format(
self.cmd.id, self.cmd.status # type: ignore[attr-defined]
)
)
def kill(self, ti):
"""
Kill (cancel) a Qubole command
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
if not ti and not self.task_instance:
raise Exception("Unable to cancel Qubole Command, context is unavailable!")
elif not ti:
ti = self.task_instance
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
def get_results(
self,
ti=None,
fp=None,
inline: bool = True,
delim=None,
fetch: bool = True,
include_headers: bool = False,
) -> str:
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(conf.get('logging', 'BASE_LOG_FOLDER'))
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
pathlib.Path(resultpath).mkdir(parents=True, exist_ok=True)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
include_headers_str = 'true' if include_headers else 'false'
self.cmd.get_results(
fp, inline, delim, fetch, arguments=[include_headers_str]
) # type: ignore[attr-defined]
fp.flush()
fp.close()
return fp.name
def get_log(self, ti) -> None:
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(cmd_id)
def get_jobs_id(self, ti) -> None:
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job information associated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(cmd_id)
def create_cmd_args(self, context) -> List[str]:
"""Creates command arguments"""
args = []
cmd_type = self.kwargs['command_type']
inplace_args = None
tags = {self.dag_id, self.task_id, context['run_id']}
positional_args_list = flatten_list(POSITIONAL_ARGS.values())
for key, value in self.kwargs.items():
if key in COMMAND_ARGS[cmd_type]:
if key in HYPHEN_ARGS:
args.append(f"--{key.replace('_', '-')}={value}")
elif key in positional_args_list:
inplace_args = value
elif key == 'tags':
self._add_tags(tags, value)
elif key == 'notify':
if value is True:
args.append("--notify")
else:
args.append(f"--{key}={value}")
args.append(f"--tags={','.join(filter(None, tags))}")
if inplace_args is not None:
args += inplace_args.split(' ')
return args
@staticmethod
def _add_tags(tags, value) -> None:
if isinstance(value, str):
tags.add(value)
elif isinstance(value, (list, tuple)):
tags.update(value)
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, List, TYPE_CHECKING
)
from ._shared import sign_string
from ._shared.constants import X_MS_VERSION
from ._shared.models import Services
from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants
from ._shared.parser import _str
if TYPE_CHECKING:
from datetime import datetime
from azure.storage.fileshare import (
ResourceTypes,
AccountSasPermissions,
ShareSasPermissions,
FileSasPermissions
)
class FileSharedAccessSignature(SharedAccessSignature):
'''
Provides a factory for creating file and share access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to generate the shares access signatures.
'''
super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
def generate_file(self, share_name, directory_name=None, file_name=None,
permission=None, expiry=None, start=None, policy_id=None,
ip=None, protocol=None, cache_control=None,
content_disposition=None, content_encoding=None,
content_language=None, content_type=None):
'''
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered rcwd.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or FileSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str policy_id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = share_name
if directory_name is not None:
resource_path += '/' + _str(directory_name) if directory_name is not None else None
resource_path += '/' + _str(file_name) if file_name is not None else None
sas = _FileSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_id(policy_id)
sas.add_resource('f')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, resource_path)
return sas.get_token()
def generate_share(self, share_name, permission=None, expiry=None,
start=None, policy_id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the share.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered rcwdl.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ShareSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str policy_id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _FileSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_id(policy_id)
sas.add_resource('s')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, share_name)
return sas.get_token()
class _FileSharedAccessHelper(_SharedAccessHelper):
def add_resource_signature(self, account_name, account_key, path):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/file/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(QueryStringConstants.SIGNED_START) +
get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource +
get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) +
get_value_to_append(QueryStringConstants.SIGNED_IP) +
get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) +
get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) +
get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
sign_string(account_key, string_to_sign))
def generate_account_sas(
account_name, # type: str
account_key, # type: str
resource_types, # type: Union[ResourceTypes, str]
permission, # type: Union[AccountSasPermissions, str]
expiry, # type: Optional[Union[datetime, str]]
start=None, # type: Optional[Union[datetime, str]]
ip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> str
"""Generates a shared access signature for the file service.
Use the returned signature with the credential parameter of any ShareServiceClient,
ShareClient, ShareDirectoryClient, or ShareFileClient.
:param str account_name:
The storage account name used to generate the shared access signature.
:param str account_key:
The account key, also called shared key or access key, to generate the shared access signature.
:param ~azure.storage.fileshare.ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param ~azure.storage.fileshare.AccountSasPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: ~datetime.datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:keyword str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:return: A Shared Access Signature (sas) token.
:rtype: str
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_authentication.py
:start-after: [START generate_sas_token]
:end-before: [END generate_sas_token]
:language: python
:dedent: 8
:caption: Generate a sas token.
"""
sas = SharedAccessSignature(account_name, account_key)
return sas.generate_account(
services=Services(fileshare=True),
resource_types=resource_types,
permission=permission,
expiry=expiry,
start=start,
ip=ip,
**kwargs
) # type: ignore
def generate_share_sas(
account_name, # type: str
share_name, # type: str
account_key, # type: str
permission=None, # type: Optional[Union[ShareSasPermissions, str]]
expiry=None, # type: Optional[Union[datetime, str]]
start=None, # type: Optional[Union[datetime, str]]
policy_id=None, # type: Optional[str]
ip=None, # type: Optional[str]
**kwargs # type: Any
): # type: (...) -> str
"""Generates a shared access signature for a share.
Use the returned signature with the credential parameter of any ShareServiceClient,
ShareClient, ShareDirectoryClient, or ShareFileClient.
:param str account_name:
The storage account name used to generate the shared access signature.
:param str share_name:
The name of the share.
:param str account_key:
The account key, also called shared key or access key, to generate the shared access signature.
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered rcwdl.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ShareSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: ~datetime.datetime or str
:param str policy_id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
:func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:keyword str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:keyword str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:keyword str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:keyword str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:keyword str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:keyword str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:return: A Shared Access Signature (sas) token.
:rtype: str
"""
sas = FileSharedAccessSignature(account_name, account_key)
return sas.generate_share(
share_name=share_name,
permission=permission,
expiry=expiry,
start=start,
policy_id=policy_id,
ip=ip,
**kwargs
)
def generate_file_sas(
account_name, # type: str
share_name, # type: str
file_path, # type: List[str]
account_key, # type: str
permission=None, # type: Optional[Union[FileSasPermissions, str]]
expiry=None, # type: Optional[Union[datetime, str]]
start=None, # type: Optional[Union[datetime, str]]
policy_id=None, # type: Optional[str]
ip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> str
"""Generates a shared access signature for a file.
Use the returned signature with the credential parameter of any ShareServiceClient,
ShareClient, ShareDirectoryClient, or ShareFileClient.
:param str account_name:
The storage account name used to generate the shared access signature.
:param str share_name:
The name of the share.
:param file_path:
The file path represented as a list of path segments, including the file name.
:type file_path: List[str]
:param str account_key:
The account key, also called shared key or access key, to generate the shared access signature.
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered rcwd.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or FileSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: ~datetime.datetime or str
:param str policy_id:
A unique value up to 64 characters in length that correlates to a
stored access policy.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:keyword str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:keyword str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:keyword str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:keyword str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:keyword str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:keyword str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:return: A Shared Access Signature (sas) token.
:rtype: str
"""
sas = FileSharedAccessSignature(account_name, account_key)
if len(file_path) > 1:
dir_path = '/'.join(file_path[:-1])
else:
dir_path = None # type: ignore
return sas.generate_file( # type: ignore
share_name=share_name,
directory_name=dir_path,
file_name=file_path[-1],
permission=permission,
expiry=expiry,
start=start,
policy_id=policy_id,
ip=ip,
**kwargs
)
|
|
import sys
if sys.version_info < (3, 0):
import urllib2
urlopen = urllib2.urlopen
HTTPError = urllib2.HTTPError
else:
import urllib.request
import urllib.error
urlopen = urllib.request.urlopen
HTTPError = urllib.error.HTTPError
try:
import json
except ImportError:
import simplejson as json
import hashlib
from .proxies import TrackProxy
from . import util
import time
# Seconds to wait for asynchronous track/upload or track/analyze jobs to complete.
DEFAULT_ASYNC_TIMEOUT = 60
class Track(TrackProxy):
"""
Represents an audio file and its analysis from The Echo Nest.
All public methods in this module return Track objects.
Depending on the information available, a Track may have some or all of the
following attributes:
acousticness float: confidence the track is "acoustic" (0.0 to 1.0)
analysis_url URL to retrieve the complete audio analysis (time expiring)
analyzer_version str: e.g. '3.01a'
artist str or None: artist name
artist_id Echo Nest ID of artist, if known
danceability float: relative danceability (0.0 to 1.0)
duration float: length of track in seconds
energy float: relative energy (0.0 to 1.0)
id str: Echo Nest Track ID, e.g. 'TRTOBXJ1296BCDA33B'
key int: between 0 (key of C) and 11 (key of B flat) inclusive
liveness float: confidence the track is "live" (0.0 to 1.0)
loudness float: overall loudness in decibels (dB)
md5 str: 32-character checksum of the original audio file, if available
mode int: 0 (major) or 1 (minor)
song_id The Echo Nest song ID for the track, if known
speechiness float: likelihood the track contains speech (0.0 to 1.0)
status str: analysis status, e.g. 'complete'
tempo float: overall BPM (beats per minute)
time_signature beats per measure (e.g. 3, 4, 5, 7)
title str or None: song title
valence float: a range from negative to positive emotional content (0.0 to 1.0)
The following attributes are available only after calling Track.get_analysis():
analysis_channels int: the number of audio channels used during analysis
analysis_sample_rate int: the sample rate used during analysis
bars list of dicts: timing of each measure
beats list of dicts: timing of each beat
codestring ENMFP code string
code_version version of ENMFP code generator
decoder audio decoder used by the analysis (e.g. ffmpeg)
echoprintstring fingerprint string using Echoprint (http://echoprint.me)
echoprint_version version of Echoprint code generator
end_of_fade_in float: time in seconds track where fade-in ends
key_confidence float: confidence that key detection was accurate
meta dict: other track metainfo (bitrate, album, genre, etc.)
mode_confidence float: confidence that mode detection was accurate
num_samples int: total samples in the decoded track
offset_seconds unused, always 0
sample_md5 str: 32-character checksum of the decoded audio file
samplerate the audio sample rate detected in the file
sections list of dicts: larger sections of song (chorus, bridge, solo, etc.)
segments list of dicts: timing, pitch, loudness and timbre for each segment
start_of_fade_out float: time in seconds where fade out begins
synchstring string providing synchronization points throughout the track
synch_version version of the synch string algorithm
tatums list of dicts: the smallest metrical unit (subdivision of a beat)
tempo_confidence float: confidence that tempo detection was accurate
time_signature_confidence float: confidence that time_signature detection was accurate
Each bar, beat, section, segment and tatum has a start time, a duration, and a confidence,
in addition to whatever other data is given.
Examples:
>>> t = track.track_from_id('TRJSEBQ1390EC0B548')
>>> t
<track - Dark Therapy>
>>> t = track.track_from_md5('96fa0180d225f14e9f8cbfffbf5eb81d')
>>> t
<track - Spoonful - Live At Winterland>
>>>
>>> t = track.track_from_filename('Piano Man.mp3')
>>> t.meta
AttributeError: 'Track' object has no attribute 'meta'
>>> t.get_analysis()
>>> t.meta
{u'album': u'Piano Man',
u'analysis_time': 8.9029500000000006,
u'analyzer_version': u'3.1.3',
u'artist': u'Billy Joel',
u'bitrate': 160,
u'detailed_status': u'OK',
u'filename': u'/tmp/tmphrBQL9/fd2b524958548e7ecbaf758fb675fab1.mp3',
u'genre': u'Soft Rock',
u'sample_rate': 44100,
u'seconds': 339,
u'status_code': 0,
u'timestamp': 1369400122,
u'title': u'Piano Man'}
>>>
"""
def __repr__(self):
try:
return "<%s - %s>" % (self._object_type.encode('utf-8'), self.title.encode('utf-8'))
except AttributeError:
# the title is None
return "< Track >"
def __str__(self):
return self.title.encode('utf-8')
def get_analysis(self):
""" Retrieve the detailed analysis for the track, if available.
Raises Exception if unable to create the detailed analysis. """
if self.analysis_url:
try:
# Try the existing analysis_url first. This expires shortly
# after creation.
try:
json_string = urlopen(self.analysis_url).read()
except HTTPError:
# Probably the analysis_url link has expired. Refresh it.
param_dict = dict(id = self.id)
new_track = _profile(param_dict, DEFAULT_ASYNC_TIMEOUT)
if new_track and new_track.analysis_url:
self.analysis_url = new_track.analysis_url
json_string = urlopen(self.analysis_url).read()
else:
raise Exception("Failed to create track analysis.")
analysis = json.loads(json_string.decode('utf-8'))
analysis_track = analysis.pop('track', {})
self.__dict__.update(analysis)
self.__dict__.update(analysis_track)
except Exception: #pylint: disable=W0702
# No detailed analysis found.
raise Exception("Failed to create track analysis.")
else:
raise Exception("Failed to create track analysis.")
def _wait_for_pending_track(trid, timeout):
status = 'pending'
param_dict = {'id': trid}
param_dict['format'] = 'json'
param_dict['bucket'] = 'audio_summary'
start_time = time.time()
end_time = start_time + timeout
# counter for seconds to wait before checking track profile again.
timeout_counter = 3
while status == 'pending' and time.time() < end_time:
time.sleep(timeout_counter)
result = util.callm('track/profile', param_dict)
status = result['response']['track']['status'].lower()
# Slowly increment to wait longer each time.
timeout_counter += timeout_counter / 2
return result
def _track_from_response(result, timeout):
"""
This is the function that actually creates the track object
"""
response = result['response']
status = response['track']['status'].lower()
if status == 'pending':
# Need to wait for async upload or analyze call to finish.
result = _wait_for_pending_track(response['track']['id'], timeout)
response = result['response']
status = response['track']['status'].lower()
if not status == 'complete':
track_id = response['track']['id']
if status == 'pending':
raise Exception('%s: the operation didn\'t complete before the timeout (%d secs)' %
(track_id, timeout))
else:
raise Exception('%s: there was an error analyzing the track, status: %s' % (track_id, status))
else:
# track_properties starts as the response dictionary.
track_properties = response['track']
# 'id' and 'md5' are separated to construct the Track object.
identifier = track_properties.pop('id')
md5 = track_properties.pop('md5', None) # tracks from song api calls will not have an md5
# Pop off the audio_summary dict and make those keys attributes
# of the Track. This includes things like tempo, energy, and loudness.
track_properties.update(track_properties.pop('audio_summary'))
return Track(identifier, md5, track_properties)
def _upload(param_dict, timeout, data):
"""
Calls upload either with a local audio file,
or a url. Returns a track object.
"""
param_dict['format'] = 'json'
param_dict['wait'] = 'true'
param_dict['bucket'] = 'audio_summary'
result = util.callm('track/upload', param_dict, POST = True, socket_timeout = 300, data = data)
return _track_from_response(result, timeout)
def _profile(param_dict, timeout):
param_dict['format'] = 'json'
param_dict['bucket'] = 'audio_summary'
result = util.callm('track/profile', param_dict)
return _track_from_response(result, timeout)
""" Below are convenience functions for creating Track objects, you should use them """
def _track_from_data(audio_data, filetype, timeout):
param_dict = {}
param_dict['filetype'] = filetype
return _upload(param_dict, timeout, audio_data)
def track_from_file(file_object, filetype, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False):
"""
Create a track object from a file-like object.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
file_object: a file-like Python object
filetype: the file type. Supported types include mp3, ogg, wav, m4a, mp4, au
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> f = open("Miaow-01-Tempered-song.mp3")
>>> t = track.track_from_file(f, 'mp3')
>>> t
< Track >
>>>
"""
if not force_upload:
try:
# Check if this file has already been uploaded.
# This is much faster than uploading.
md5 = hashlib.md5(file_object.read()).hexdigest()
return track_from_md5(md5)
except util.EchoNestAPIError:
# Fall through to do a fresh upload.
pass
file_object.seek(0)
return _track_from_data(file_object.read(), filetype, timeout)
def track_from_filename(filename, filetype = None, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False):
"""
Create a track object from a filename.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
filename: A string containing the path to the input file.
filetype: A string indicating the filetype; Defaults to None (type determined by file extension).
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> t = track.track_from_filename("Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
"""
filetype = filetype or filename.split('.')[-1]
file_object = open(filename, 'rb')
result = track_from_file(file_object, filetype, timeout, force_upload)
file_object.close()
return result
def track_from_url(url, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from a public http URL.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.
Example:
>>> t = track.track_from_url("http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
"""
param_dict = dict(url = url)
return _upload(param_dict, timeout, data=None)
def track_from_id(identifier, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from an Echo Nest track ID.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
identifier: A string containing the ID of a previously analyzed track.
Example:
>>> t = track.track_from_id("TRWFIDS128F92CC4CA")
>>> t
<track - Let The Spirit>
>>>
"""
param_dict = dict(id = identifier)
return _profile(param_dict, timeout)
def track_from_md5(md5, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from an md5 hash.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
md5: A string 32 characters long giving the md5 checksum of a track already analyzed.
Example:
>>> t = track.track_from_md5('b8abf85746ab3416adabca63141d8c2d')
>>> t
<track - Neverwas Restored (from Neverwas Soundtrack)>
>>>
"""
param_dict = dict(md5 = md5)
return _profile(param_dict, timeout)
|
|
import acm
import ael
import FHTI_EDD_OTC_Util
import HTI_ExcelReport2
import HTI_Util
import HTI_FeedTrade_EDD_Util
from shutil import copyfile
import os
ttSaveToFile = "Check this to save the report instead of showing it."
ttCSV = "Check this to export the report in CSV format"
ttFileName = "File name and path of the report. YYYYMMDD in the file name will be replaced by the valuation date."
ttSendMail = "Send report as email attachment."
Counterparty = 0
Contract_Date = 1
Contract_No = 2
TRS = 3
Security_Name = 4
BBG_Code = 5
Currency1 = 6
Quantity = 7
Avg_Exe_Price = 8
Exe_Notional = 9
Init_Price = 10
Settle_Ccy = 11
Settle_Amount = 12
IA_Ccy = 13
Ini_Cash_Bal = 14
Avail_Cash_Bal = 15
Loan_Curr = 16
Loan_Amount = 17
Currency2 = 18
Closing_Price = 19
MV_CURR = 20
FX_Currency = 21
Today_FX = 22
MV_HKD = 23
All_Time_LTV = 24
Coll_Trigger_LTV = 25
Terminate_LTV = 26
MTM_LVT = 27
Margin_Pool = 28
MTM_Exposure = 29
Est_All_Time_LTV = 30
Est_Coll_Trigger_LTV = 31
Est_Terminate_LTV = 32
Income_Client_to_Pay = 33
EDD_New_Income = 34
PWM_New_Income = 35
'''
def report_compare(x, y):
if x[Counterparty] > y[Counterparty]:
return 1
elif x[Counterparty] < y[Counterparty]:
return -1
else:
if x[Contract_No] > y[Contract_No]:
return 1
elif x[Contract_No] < y[Contract_No]:
return -1
else:
return 0
'''
def report_compare(x, y):
if x[Contract_Date] == '':
return -1
if y[Contract_Date] == '':
return 1
if ael.date(x[Contract_Date]).to_string('%Y%m%d') > ael.date(y[Contract_Date]).to_string('%Y%m%d'):
return 1
elif ael.date(x[Contract_Date]).to_string('%Y%m%d') < ael.date(y[Contract_Date]).to_string('%Y%m%d'):
return -1
if x[Contract_No] > y[Contract_No]:
return 1
elif x[Contract_No] < y[Contract_No]:
return -1
if x[Counterparty] > y[Counterparty]:
return 1
elif x[Counterparty] < y[Counterparty]:
return -1
if x[Currency1] > y[Currency1]:
return 1
elif x[Currency1] < y[Currency1]:
return -1
if x[Quantity] < y[Quantity]:
return 1
elif x[Quantity] > y[Quantity]:
return -1
return 0
def disable_variables(variables, enable = 0):
for i in variables:
for j in ael_variables:
if i == j[0]:
j[9] = enable
def cb(index, fieldValues):
global ael_variables
if ael_variables[index][0] == 'saveToFile':
disable_variables(('fileName',), fieldValues[index])
return fieldValues
def cb2(index, fieldValues):
global ael_variables
if ael_variables[index][0] == 'sendEmail':
disable_variables(('emaillist',), fieldValues[index])
disable_variables(('subject',), fieldValues[index])
return fieldValues
def cb3(index, fieldValues):
global ael_variables
if ael_variables[index][0] == 'saveToNetwork':
disable_variables(('networkDriveLocation',), fieldValues[index])
return fieldValues
ael_variables = [['asofdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Report Date', None, 1], \
['saveToFile', 'Save to file', 'int', [1, 0], 0, 0, 0, ttSaveToFile, cb, None], \
['fileName', 'File name', 'string', None, 'c:\\temp\\CollateralMgt_YYYYMMDD', 0, 0, ttFileName, None, 0], \
['sendEmail', 'Send mail', 'int', [1, 0], 0, 0, 0, ttSendMail, cb2, None], \
['emaillist', 'Email', 'string', None, 'louis.ck.wong@htisec.com', 0, 0, 'Email List', None, 0], \
['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), None, 1, 1, 'Acquirer(s)', None, 1], \
['counterparties', 'Counterparty(s)', 'string', HTI_Util.getAllParties(), None, 0, 1, 'Counterparty(s)', None, 1], \
['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), None, 1, 1, 'Portfolio', None, 1], \
['subject', 'Email subject', 'string', None, 'FA4 (PROD) EDD Collateral Management Report (TRS) asof @date', 1, 0, 'Email Subject', None, 1], \
['saveToCSV', 'CSV format', 'int', [1, 0], 0, 0, 0, ttCSV, None, None], \
['title', 'Report title', 'string', None, 'Collateral Management Report (TRS) asof @date', 1, 0, 'Report Title', None, 1],
['currclspricemkt', 'Current Closing Price Market', 'string', None, '', 1, 0, 'Current Closing Price Market', None, 1],
['histclspricemkt', 'Historical Closing Price Market', 'string', None, '', 1, 0, 'Historical Closing Price Market', None, 1],
['base_ccy', 'Base Ccy', 'string', None, '', 1, 0, 'Base Ccy', None, 1], \
['groupbypty', 'Group by Counterparty', 'int', [1, 0], 0, 0, 0, 'Group by Counterparty', None, None], \
['fileperpty', 'Separte File for Counterparty', 'int', [1, 0], 0, 0, 0, 'Separte File for Counterparty', None, None], \
['saveToNetwork', 'Copy to Network', 'int', [1, 0], 0, 0, 0, 'Copy to Network Drive', cb3, None], \
['networkDriveLocation', 'Network Drive Location', 'string', None, 'C:\\temp\\PositionReport', 0, 0, 'Network Drive Location', None, 0]]
def getAvailableCash(external_ref, currclspricemkt, histclspricemkt, base_ccy, und_insaddr, asofdate):
avail_cash_bal = 0.0
acm_ins = getFirstTRS(external_ref, und_insaddr)
if acm_ins != None:
ins_ccy = acm_ins.Currency().Name()
report_curr = acm_ins.AdditionalInfo().Reporting_Currency()
if report_curr == None:
report_ccy = base_ccy
else:
report_ccy = report_curr.Name()
margin_inject_by_client = getMarginInjectedByClient(external_ref)
all_time_LTV, trigger_LTV, terminate_LTV, contract_date, curr_insid, counterparty_ptyid = getLTVs(external_ref, asofdate, ins_ccy, report_ccy)
sbl_rate = getSBLContractInfo(external_ref, asofdate)
loan_amount, loan_ccy, fx_vs_report_ccy, loan_int, edd_int, pwm_int, loan_trdnbr, fixed_amt = getLoanContractInfo(external_ref, currclspricemkt, histclspricemkt, asofdate, curr_insid, report_ccy, sbl_rate, asofdate)
if loan_amount != 0.0:
ini_cash_bal = loan_amount / fx_vs_report_ccy + margin_inject_by_client
else:
ini_cash_bal = margin_inject_by_client
exe_notional, shortportfolio_flag = getExeNotional(external_ref, und_insaddr, asofdate)
if shortportfolio_flag == True:
avail_cash_bal = 0.0
else:
shortportfolio_amt, shortportfolio_flag = getTotalTradeNominal(external_ref, True, und_insaddr, asofdate)
avail_cash_bal = ini_cash_bal - exe_notional + shortportfolio_amt
return avail_cash_bal
def getLTVs(external_ref, dt, fm_ccy, to_ccy, und_insaddr):
all_time_LTV = 0.0
trigger_LTV = 0.0
terminate_LTV = 0.0
#fx_vs_report_ccy = 1.0
strSql = """select t.trdnbr, pty.ptyid,
add_info(pty, 'Pty All Time LTV') 'p_all_time_LTV',
add_info(pty, 'Pty Trigger LTV') 'p_trigger_LTV',
add_info(pty, 'Pty Terminate LTV') 'p_terminate_LTV',
add_info(t, 'Trd All Time LTV') 't_all_time_LTV',
add_info(t, 'Trd Trigger LTV') 't_trigger_LTV',
add_info(t, 'Trd Terminate LTV') 't_terminate_LTV',
add_info(t, 'Trd Contract Date') 't_contract_date',
c.insid
from trade t, party pty, instrument i, leg l, instrument c
where t.counterparty_ptynbr = pty.ptynbr
and t.status not in ('Void', 'Simulated')
and t.trdnbr = t.trx_trdnbr
and i.insaddr = l.insaddr
and l.payleg = 'No' and l.curr = c.insaddr
and t.insaddr = i.insaddr and i.instype = 'TotalReturnSwap'
and add_info(t, 'External Reference') = '%s'
and l.float_rate = %s""" % (external_ref, str(und_insaddr))
print strSql
rs = ael.asql(strSql)
columns, buf = rs
for table in buf:
for row in table:
trdnbr = row[0]
ptyid = str(row[1])
p_all_time_LTV = row[2]
p_trigger_LTV = row[3]
p_terminate_LTV = row[4]
t_all_time_LTV = row[5]
t_trigger_LTV = row[6]
t_terminate_LTV = row[7]
t_contract_date = row[8]
curr_insid = row[9]
#t_fx_vs_report_ccy = row[8]
if t_all_time_LTV != '':
all_time_LTV = float(t_all_time_LTV) / 100.0
else:
if p_all_time_LTV != '':
all_time_LTV = float(p_all_time_LTV) / 100.0
if t_trigger_LTV != '':
trigger_LTV = float(t_trigger_LTV) / 100.0
else:
if p_trigger_LTV != '':
trigger_LTV = float(p_trigger_LTV) / 100.0
if t_terminate_LTV != '':
terminate_LTV = float(t_terminate_LTV) / 100.0
else:
if p_terminate_LTV != '':
terminate_LTV = float(p_terminate_LTV) / 100.0
print 'all_time_LTV=%s, trigger_LTV=%s, terminate_LTV=%s' % (all_time_LTV, trigger_LTV, terminate_LTV)
return all_time_LTV, trigger_LTV, terminate_LTV, t_contract_date, curr_insid, ptyid
def getContractDate(external_ref):
t_contract_date = ''
strSql = """select add_info(t, 'Trd Contract Date') 't_contract_date'
from trade t, instrument i
where t.status not in ('Void', 'Simulated')
and t.trdnbr = t.trx_trdnbr
and t.insaddr = i.insaddr and i.instype = 'TotalReturnSwap'
and add_info(t, 'External Reference') = '%s'
and add_info(t, 'Group Trade Ref') = t.trdnbr""" % (external_ref)
#print 'louislouis'
print strSql
rs = ael.asql(strSql)
columns, buf = rs
for table in buf:
for row in table:
t_contract_date = row[0]
return t_contract_date
def getMarginInjectedByClient(external_ref):
strSql = """select sum(p.amount) 'amount'
from instrument i, trade t, payment p
where i.insaddr = t.insaddr
and i.instype = 'Curr'
and t.status not in ('Void', 'Simulated')
and t.insaddr = t.curr
and t.trdnbr = p.trdnbr
and add_info(t, 'External Reference') = '%s'""" % (external_ref)
#print strSql
rs = ael.asql(strSql)
columns, buf = rs
amount = 0.0
for table in buf:
for row in table:
amount = amount + float(row[0])
return abs(amount)
def getSBLContractInfo(external_ref, asofdate):
strSql = """select l.fixed_rate, t.trdnbr
from trade t, instrument i, leg l, instrument c
where i.insaddr = t.insaddr
and i.instype = 'Deposit'
and t.status not in ('Void', 'Simulated')
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr
and i.curr = c.insaddr""" % (external_ref)
print strSql
rs = ael.asql(strSql)
columns, buf = rs
sbl_rate = 0.0
for table in buf:
for row in table:
loan_trdnbr = int(row[1])
acm_trd = acm.FTrade[loan_trdnbr]
if acm_trd == None:
continue
if acm_trd.AdditionalInfo().Trd_SBL() == None:
continue
if acm_trd.AdditionalInfo().Trd_SBL() != None:
if acm_trd.AdditionalInfo().Trd_SBL() == False:
continue
sbl_rate = float(row[0]) / 100.0
break
return sbl_rate
def getLoanContractInfo(external_ref, currclspricemkt, histclspricemkt, dt, fm_ccy, to_ccy, sbl_rate, asofdate):
strSql = """select t.premium 'amount', c.insid, add_info(t, 'Trd FX Rate vs HKD') 't_fx_vs_hkd', t.trdnbr, l.fixed_rate
from trade t, instrument i, leg l, instrument c
where i.insaddr = t.insaddr
and i.instype = 'Deposit'
and t.status not in ('Void', 'Simulated')
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr
and i.curr = c.insaddr
order by t.trdnbr""" % (external_ref)
print strSql
rs = ael.asql(strSql)
columns, buf = rs
amount = 0.0
loan_ccy = None
loan_int = 0.0
pwm_int = 0.0
edd_int = 0.0
fx_vs_report_ccy = None
hasDeposit = False
loan_trdnbr = None
pwm_int_adj = 0.0
bu_int_adj = 0.0
fixed_amt = 0.0
trdnbr = None
ael_asofdate = ael.date(asofdate)
for table in buf:
for row in table:
loan_trdnbr = int(row[3])
if trdnbr == None:
trdnbr = loan_trdnbr # the first loan contract
acm_trd = acm.FTrade[loan_trdnbr]
if acm_trd == None:
continue
if acm_trd.AdditionalInfo().Trd_SBL() != None:
if acm_trd.AdditionalInfo().Trd_SBL() == True:
continue
hasDeposit = True
loan_ccy = row[1]
if row[2] != '':
fx_vs_report_ccy = float(row[2])
fixed_rate = float(row[4])
# one external reference would have more than one loan trade now (20170511)
fixed_amt_trd = 0.0
edd_int_trd = 0.0
pwm_int_trd = 0.0
ael_trd = ael.Trade[loan_trdnbr]
if acm_trd != None:
for acm_leg in acm_trd.Instrument().Legs():
print 'louis', acm_leg.LegType()
if acm_leg.LegType() in ('Float', 'Fixed'):
print 'louis', acm_leg.StartDate(), acm_leg.EndDate()
if ael.date(acm_leg.StartDate()) <= ael_asofdate and ael.date(acm_leg.EndDate()) >= ael_asofdate:
amount = amount + float(row[0])
acc_int = ael_trd.insaddr.interest_accrued(None, dt, ael_trd.curr.insid) * ael_trd.quantity
settle_int = ael_trd.interest_settled(None, dt, ael_trd.curr.insid)
loan_int_trd = acc_int + settle_int # loan interest for this trade
loan_int = loan_int + loan_int_trd #sumup all loan_int
if fixed_rate == 0.0:
edd_int_trd = (loan_int_trd * (1.00 - sbl_rate)) / 2.0
else:
edd_int_trd = (loan_int_trd / (fixed_rate / 100.0) * (fixed_rate / 100.0 - sbl_rate)) / 2.0
pwm_int_trd = edd_int_trd
for acm_cash_flow in acm_leg.CashFlows():
if acm_cash_flow.CashFlowType() == 'Fixed Amount' and acm_cash_flow.AdditionalInfo().Cash_Flow_Type() == 'Funding':
fixed_amt_trd = -acm_cash_flow.FixedAmount() * acm_loan_trd.Premium() + fixed_amt_trd
edd_fixed_amt_trd = fixed_amt_trd / 2.0
pwm_fixed_amt_trd = fixed_amt - edd_fixed_amt_trd
edd_int_trd = edd_int_trd + edd_fixed_amt_trd
pwm_int_trd = pwm_int_trd + pwm_fixed_amt_trd
pwm_int = pwm_int + pwm_int_trd
edd_int = edd_int + edd_int_trd
acm_payments = acm.FPayment.Select("trade = %s and validFrom <= '%s'" % (acm_trd.Oid(), ael_asofdate.add_days(1)))
for acm_payment in acm_payments:
if acm_payment.Type() == 'PWM Int Adj':
pwm_int_adj = pwm_int_adj + acm_payment.Amount()
elif acm_payment.Type() == 'BU Int Adj':
bu_int_adj = bu_int_adj + acm_payment.Amount()
#break
if hasDeposit:
if fx_vs_report_ccy == None:
acm_trd = acm.FTrade[trdnbr]
dt = ael.date(acm_trd.TradeTime()[0:10])
fx_vs_report_ccy = getFx(dt, fm_ccy, to_ccy, currclspricemkt, histclspricemkt)
else:
amount = 0.0
loan_ccy = ""
fx_vs_report_ccy = 1.0
'''
fixed_amt = 0.0
if loan_trdnbr != None:
acm_loan_trd = acm.FTrade[loan_trdnbr]
if acm_loan_trd != None:
for acm_leg in acm_loan_trd.Instrument().Legs():
for acm_cash_flow in acm_leg.CashFlows():
if acm_cash_flow.CashFlowType() == 'Fixed Amount' and acm_cash_flow.AdditionalInfo().Cash_Flow_Type() == 'Funding':
fixed_amt = -acm_cash_flow.FixedAmount() * acm_loan_trd.Premium()
edd_fixed_amt = fixed_amt / 2.0
pwm_fixed_amt = fixed_amt - edd_fixed_amt
print 'edd_int1', edd_int
edd_int = edd_int + edd_fixed_amt
pwm_int = pwm_int + pwm_fixed_amt
print 'edd_int2', edd_int
acm_payments = acm.FPayment.Select("trade = %s and validFrom <= '%s'" % (acm_loan_trd.Oid(), ael.date(asofdate).add_days(1)))
for acm_payment in acm_payments:
if acm_payment.Type() == 'PWM Int Adj':
pwm_int_adj = pwm_int_adj + acm_payment.Amount()
elif acm_payment.Type() == 'BU Int Adj':
bu_int_adj = bu_int_adj + acm_payment.Amount()
'''
if pwm_int == None and pwm_int_adj == 0.0:
pwm_int = None
elif pwm_int == None and pwm_int_adj != 0.0:
pwm_int = pwm_int_adj
else:
pwm_int = pwm_int + pwm_int_adj
if edd_int == None and bu_int_adj == 0.0:
edd_int = None
elif edd_int == None and bu_int_adj != 0.0:
edd_int = bu_int_adj
else:
edd_int = edd_int + bu_int_adj
return abs(amount), loan_ccy, fx_vs_report_ccy, loan_int, edd_int, pwm_int, trdnbr, fixed_amt
def getTotalTradeQuantity(external_ref, und_insaddr, asofdate):
acm_ins = getFirstTRS(external_ref, und_insaddr)
if acm_ins == None:
return None
print "instrument='%s' and status <> 'Void' and status <> 'Simulated'" % acm_ins.Name()
acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime <= '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trd = None
if acm_trds != None:
for acm_trd in acm_trds:
if acm_trd.TrxTrade() != None:
if acm_trd.Oid() == acm_trd.TrxTrade().Oid():
break
else:
return None
total_quantity = 0.0
if acm_trd.TrxTrade() == None:
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
return abs(total_quantity)
else:
return None
elif acm_trd.Oid() == acm_trd.TrxTrade().Oid():
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
# find all other trade
acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime <= '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
for acm_trs_trd in acm_trs_trds:
if acm_trs_trd.Oid() != acm_trs_trd.TrxTrade().Oid() and \
acm_trs_trd.Status() not in ('Void', 'Simulated') and \
acm_trs_trd.Instrument().InsType() == 'TotalReturnSwap':
total_quantity = total_quantity + acm_trs_trd.Quantity()
print total_quantity
'''
if total_quantity == 0.0:
return None
else:
return abs(total_quantity)
'''
return -total_quantity
else:
return -total_quantity
def getFirstTRS(external_ref, und_insaddr):
strSql = """select i.insid
from trade t, instrument i, leg l
where i.insaddr = t.insaddr
and i.instype = 'TotalReturnSwap'
and t.status not in ('Void', 'Simulated')
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr and l.payleg = 'No'
and l.float_rate = %s
and t.trdnbr = t.trx_trdnbr""" % (external_ref, str(und_insaddr))
print strSql
rs = ael.asql(strSql)
columns, buf = rs
insid = ''
for table in buf:
for row in table:
insid = str(row[0]).strip()
break
if insid == '':
return None
acm_ins = acm.FInstrument[insid]
return acm_ins
def getTotalTradeNominal(external_ref, shortportfolio, und_insaddr, asofdate):
acm_ins = getFirstTRS(external_ref, und_insaddr)
if acm_ins == None:
return None, None
print asofdate
acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime <= '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trd = None
if acm_trds != None:
for acm_trd in acm_trds:
if acm_trd.TrxTrade() != None:
if acm_trd.Oid() == acm_trd.TrxTrade().Oid():
break
else:
return None, None
shortportfolio_flag = acm_trd.AdditionalInfo().Short_Portfolio()
if shortportfolio_flag != None:
if shortportfolio == True and shortportfolio_flag == True:
return 0.0, shortportfolio_flag
else:
if shortportfolio == True:
return 0.0, shortportfolio_flag
shortportfolio_sign = 1.0
if shortportfolio_flag == True:
if shortportfolio == False:
shortportfolio_sign = -1.0
else:
shortportfolio_sign = 1.0
total_nominal = 0.0
if acm_trd.TrxTrade() == None:
if acm_trd.Status() not in ('Void', 'Simulated'):
total_nominal = total_nominal - acm_trd.Nominal() * shortportfolio_sign
#total_nominal = total_nominal + acm_trd.Premium() * shortportfolio_sign
#total_nominal = total_nominal + FHTI_EDD_OTC_Util.getTradePaymentAmountForTRSInitPrice(acm_trd, asofdate)
total_nominal = total_nominal + FHTI_EDD_OTC_Util.getTradePaymentAmountForTRSInitPrice(acm_trd, asofdate) * shortportfolio_sign
return total_nominal
else:
return None, None
elif acm_trd.Oid() == acm_trd.TrxTrade().Oid():
if acm_trd.Status() not in ('Void', 'Simulated'):
total_nominal = total_nominal - acm_trd.Nominal() * shortportfolio_sign
#total_nominal = total_nominal + acm_trd.Premium() * shortportfolio_sign
#total_nominal = total_nominal + FHTI_EDD_OTC_Util.getTradePaymentAmountForTRSInitPrice(acm_trd, asofdate)
total_nominal = total_nominal + FHTI_EDD_OTC_Util.getTradePaymentAmountForTRSInitPrice(acm_trd, asofdate) * shortportfolio_sign
# find all other trade
acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime <= '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
for acm_trs_trd in acm_trs_trds:
if acm_trs_trd.Oid() != acm_trs_trd.TrxTrade().Oid() and \
acm_trs_trd.Status() not in ('Void', 'Simulated') and \
acm_trs_trd.Instrument().InsType() == 'TotalReturnSwap':
#print acm_trs_trd.Oid(), acm_trs_trd.Nominal(), acm_trs_trd.Quantity()
total_nominal = total_nominal - acm_trs_trd.Nominal() * shortportfolio_sign
#total_nominal = total_nominal + acm_trs_trd.Premium() * shortportfolio_sign
#total_nominal = total_nominal + FHTI_EDD_OTC_Util.getTradePaymentAmountForTRSInitPrice(acm_trs_trd, asofdate)
total_nominal = total_nominal + FHTI_EDD_OTC_Util.getTradePaymentAmountForTRSInitPrice(acm_trs_trd, asofdate) * shortportfolio_sign
return total_nominal * shortportfolio_sign, shortportfolio_flag
return total_nominal, shortportfolio_flag
def getExeNotional(external_ref, und_insaddr, asofdate):
return getTotalTradeNominal(external_ref, False, und_insaddr, asofdate)
def getFx(dt, fm_ccy, to_ccy, currclspricemkt, histclspricemkt):
if fm_ccy == 'CNY':
fm_ccy = 'CNH'
if to_ccy == 'CNY':
to_ccy = 'CNH'
ins_fm_ccy = ael.Instrument[fm_ccy]
ins_to_ccy = ael.Instrument[to_ccy]
ins_usd = ael.Instrument['USD']
try:
if dt == ael.date_today():
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
else:
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(dt, ins_usd.insid, 'Close', 0, histclspricemkt)
to_usd_rate = ins_usd.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
except:
#fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
#to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
#fx_rate = fm_usd_rate * to_usd_rate
fx_rate = 0.0
#fx_rate = ins_fm_ccy.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
return fx_rate
def getUnderlyingPrice(dt, ael_und_ins, currclspricemkt, histclspricemkt):
try:
if dt == ael.date_today():
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
else:
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Close', 0, histclspricemkt)
except:
#cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
cls_price = 0.0
return cls_price
def getMarginPool():
return 0.0
def ael_main(dict):
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
title = dict['title'].replace('@date', str(asofdate))
subject = dict['subject'].replace('@date', str(asofdate))
saveToFile = dict['saveToFile']
saveToCSV = dict['saveToCSV']
fileName = dict['fileName']
sendEmail = dict['sendEmail']
emailList = dict['emaillist']
recipients = emailList.split(',')
fileName = fileName.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Counterparties
pty_array_list = dict['counterparties']
pty_list = ''
for pty in pty_array_list:
if pty_list == '':
pty_list = "'" + pty + "'"
else:
pty_list = pty_list + ",'" + pty + "'"
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
base_ccy = dict['base_ccy']
groupbypty = dict['groupbypty']
fileperpty = dict['fileperpty']
saveToNetwork = dict['saveToNetwork']
networkDriveLocation = dict['networkDriveLocation']
if not fileperpty:
genCollateralMgtRpt(asofdate, pf_list, acq_list, pty_list, base_ccy, currclspricemkt, histclspricemkt, title, subject, saveToFile, saveToCSV, sendEmail, emailList, fileName, groupbypty, fileperpty)
else:
strSql = """select t.trdnbr, add_info(t, 'External Reference') 'external_ref', l.float_rate, c.ptyid
into externalRef
from instrument i, trade t, party a, portfolio pf, leg l, party c
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and i.instype = 'TotalReturnSwap'
and t.acquirer_ptynbr = a.ptynbr
and a.ptyid in (@accquirer_list)
and pf.prfid in (@portfolio_list)
and t.time < '%s'
and i.insaddr = l.insaddr and l.payleg = 'No'
and t.counterparty_ptynbr = c.ptynbr
@counterparty_list_sql
select distinct ptyid
from externalRef
where external_ref ~= ''""" % (asofdate.add_days(1))
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@accquirer_list', acq_list)
if pty_list != '':
counterparty_list_sql = 'and c.ptyid in (@counterparty_list)'
counterparty_list_sql = counterparty_list_sql.replace("@counterparty_list", pty_list)
strSql = strSql.replace("@counterparty_list_sql", counterparty_list_sql)
else:
strSql = strSql.replace("@counterparty_list_sql", ' ')
print strSql
rs = ael.asql(strSql)
columns, buf = rs
rptContent = []
for table in buf:
for row in table:
ptyid = row[0]
print 'ptyid', ptyid
ptyfileName = fileName.replace('[ptyid]', ptyid)
print 'ptyfileName', ptyfileName
dir_path = os.path.dirname(os.path.realpath(ptyfileName))
if os.path.exists(dir_path) == False:
os.mkdir(dir_path)
exact_ptyfileName = os.path.basename(ptyfileName)
print 'exact_ptyfileName',exact_ptyfileName
genCollateralMgtRpt(asofdate, pf_list, acq_list, "'"+ptyid+"'", base_ccy, currclspricemkt, histclspricemkt, title, subject, saveToFile, saveToCSV, sendEmail, emailList, ptyfileName, groupbypty, fileperpty)
destination_fileName = networkDriveLocation + '\\' + exact_ptyfileName
destination_fileName = destination_fileName.replace('[ptyid]', ptyid)
dir_path = os.path.dirname(os.path.realpath(destination_fileName))
if os.path.exists(dir_path) == False:
os.mkdir(dir_path)
#print ptyfileName, destination_fileName
if saveToNetwork:
copyfile(ptyfileName+'.xlsx', destination_fileName+'.xlsx')
def genCollateralMgtRpt(asofdate, pf_list, acq_list, pty_list, base_ccy, currclspricemkt, histclspricemkt, title, subject, saveToFile, saveToCSV, sendEmail, emailList, fileName, groupbypty, fileperpty):
product_strategy = 'SP_Portfolio Swap' #default no grouping
strSql = """select t.trdnbr, add_info(t, 'External Reference') 'external_ref', l.float_rate
into externalRef
from instrument i, trade t, party a, portfolio pf, leg l, party c
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and i.instype = 'TotalReturnSwap'
and t.acquirer_ptynbr = a.ptynbr
and a.ptyid in (@accquirer_list)
and pf.prfid in (@portfolio_list)
and t.time < '%s'
and i.insaddr = l.insaddr and l.payleg = 'No'
and t.counterparty_ptynbr = c.ptynbr
@counterparty_list_sql
/*and c.ptyid = 'EDD SPRUCE LIGHT ABSOL RET F'*/
select distinct external_ref, float_rate
from externalRef
where external_ref ~= ''""" % (asofdate.add_days(1))
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@accquirer_list', acq_list)
if pty_list != '':
counterparty_list_sql = 'and c.ptyid in (@counterparty_list)'
counterparty_list_sql = counterparty_list_sql.replace("@counterparty_list", pty_list)
strSql = strSql.replace("@counterparty_list_sql", counterparty_list_sql)
else:
strSql = strSql.replace("@counterparty_list_sql", ' ')
print strSql
rs = ael.asql(strSql)
columns, buf = rs
rptContent = []
for table in buf:
for row in table:
rptRow = []
external_ref = str(row[0])
und_insaddr = row[1]
#if external_ref != 'LTRS_PRY_2016030901' and external_ref != 'DPS_2016053101':
# continue
print 'external_ref', external_ref
acm_ins = getFirstTRS(external_ref, und_insaddr)
#print 'acm_ins', acm_ins.Name()
if acm_ins != None:
ins_ccy = acm_ins.Currency().Name()
report_curr = acm_ins.AdditionalInfo().Reporting_Currency()
if report_curr == None:
report_ccy = base_ccy
else:
report_ccy = report_curr.Name()
print 'report_ccy', report_ccy
all_time_LTV, trigger_LTV, terminate_LTV, contract_date, curr_insid, counterparty_ptyid = getLTVs(external_ref, asofdate, ins_ccy, report_ccy, und_insaddr)
contract_date = getContractDate(external_ref)
#fx_vs_hkd = 5.8153
print 'curr_insid', curr_insid
margin_inject_by_client = getMarginInjectedByClient(external_ref)
print 'margin_inject_by_client=%s' % margin_inject_by_client
sbl_rate = getSBLContractInfo(external_ref, asofdate)
loan_amount, loan_ccy, fx_vs_report_ccy, loan_int, edd_int, pwm_int, loan_trdnbr, fixed_amt = getLoanContractInfo(external_ref, currclspricemkt, histclspricemkt, asofdate, curr_insid, report_ccy, sbl_rate, asofdate)
print 'fx_vs_report_ccy=%s' % fx_vs_report_ccy
print 'loan_amount', loan_amount
print 'loan_int', loan_int
print 'edd_int', edd_int
print 'pwm_int', pwm_int
print 'loan_trdnbr', loan_trdnbr
print 'fixed_amt', fixed_amt
if loan_int == None:
client_to_pay = None
else:
client_to_pay = loan_int + fixed_amt
print 'contract_date', contract_date
if loan_amount != 0.0:
ini_cash_bal = loan_amount / fx_vs_report_ccy + margin_inject_by_client
else:
ini_cash_bal = margin_inject_by_client
print 'ini_cash_bal=%s' % ini_cash_bal
exe_notional, shortportfolio_flag = getExeNotional(external_ref, und_insaddr, asofdate)
print 'exe_notional', exe_notional
if shortportfolio_flag == True:
avail_cash_bal = 0.0
else:
shortportfolio_amt, shortportfolio_flag = getTotalTradeNominal(external_ref, True, und_insaddr, asofdate)
avail_cash_bal = ini_cash_bal - exe_notional + shortportfolio_amt
# Temporary requested by Ben Teng on 20170321
if loan_amount == 0.0:
avail_cash_bal = 0.0
print 'avail_cash_bal', avail_cash_bal
today_fx = getFx(asofdate, ins_ccy, report_ccy, currclspricemkt, histclspricemkt)
#today_fx = 5.7098
print 'today_fx', today_fx
margin_pool = getMarginPool()
qty = getTotalTradeQuantity(external_ref, und_insaddr, asofdate)
if round(qty, 2) == 0.0:
# suppress all have been closed out
continue
print 'qty', qty
acm_und_ins = getTRSUnderlying(acm_ins)
today_underlying_price = getUnderlyingPrice(asofdate, ael.Instrument[acm_und_ins.Name()], currclspricemkt, histclspricemkt)
print 'today_underlying_price', today_underlying_price
init_price = FHTI_EDD_OTC_Util.initPrice_TRS(acm_ins, asofdate)
product_strategy = productStrategy_TRS(acm_ins, asofdate)
if shortportfolio_flag != None:
if shortportfolio_flag == True and product_strategy == 'SP_Portfolio Swap':
original_mv = -(init_price - today_underlying_price) * qty
else:
original_mv = today_underlying_price * qty
else:
original_mv = today_underlying_price * qty
#original_mv = today_underlying_price * qty
print 'original_mv', original_mv
if qty == 0.0:
mtm_lvt = 0.0
else:
if (((original_mv+avail_cash_bal)*today_fx)+margin_pool) == 0.0:
mtm_lvt = 0.0
else:
if loan_amount == 0.0:
if today_underlying_price != 0.0:
mtm_lvt = init_price / today_underlying_price
else:
mtm_lvt = 0.0
else:
mtm_lvt = loan_amount/(((original_mv+avail_cash_bal)*today_fx)+margin_pool)
print 'mtm_lvt=%s'%mtm_lvt
mv_in_report_ccy = original_mv * today_fx
if loan_amount == 0.0:
mtm_exposure = mv_in_report_ccy - (exe_notional * today_fx)
mtm_lvt = 0.0
else:
mtm_exposure = loan_amount-(((original_mv+avail_cash_bal)*today_fx)*all_time_LTV)-margin_pool
# added by Ben Teng on 20170328
if shortportfolio_flag != None:
if shortportfolio_flag == True:
mtm_exposure = (today_underlying_price * qty) - exe_notional
if product_strategy == 'SP_TRS':
if today_underlying_price != 0.0:
mtm_lvt = init_price / today_underlying_price
else:
mtm_lvt = 0.0
print 'mtm_exposure=%s'%mtm_exposure
if all_time_LTV != 0.0 and today_fx != 0.0 and qty != 0.0:
est_all_time_lvt = str(((loan_amount/all_time_LTV-margin_pool)/today_fx-avail_cash_bal)/qty)
else:
est_all_time_lvt = ''
if est_all_time_lvt != '':
if float(est_all_time_lvt) == 0.0:
est_all_time_lvt = ''
print 'est_all_time_lvt=%s'%est_all_time_lvt
if trigger_LTV != 0.0 and today_fx != 0.0 and qty != 0.0:
est_coll_trigger_lvt = str(((loan_amount/trigger_LTV-margin_pool)/today_fx-avail_cash_bal)/qty)
else:
est_coll_trigger_lvt = ''
if est_coll_trigger_lvt != '':
if float(est_coll_trigger_lvt) == 0.0:
est_coll_trigger_lvt = ''
print 'est_coll_trigger_lvt=%s'%est_coll_trigger_lvt
if terminate_LTV != 0.0 and today_fx != 0.0 and qty != 0.0:
est_terminate_trigger_lvt = str(((loan_amount/terminate_LTV-margin_pool)/today_fx-avail_cash_bal)/qty)
else:
est_terminate_trigger_lvt = ''
if est_terminate_trigger_lvt != '':
if float(est_terminate_trigger_lvt) == 0.0:
est_terminate_trigger_lvt = ''
print 'est_terminate_trigger_lvt=%s'%est_terminate_trigger_lvt
underlying_bbg = getUndInstrumentBBTicker(acm_ins)
ins_name = ''
if acm_und_ins.AdditionalInfo().Ins_Description() != None:
ins_name = acm_und_ins.AdditionalInfo().Ins_Description()
#init_price = FHTI_EDD_OTC_Util.initPrice_TRS(acm_ins, asofdate)
avg_price = FHTI_EDD_OTC_Util.avgPrice_TRS(acm_ins, asofdate)
settlement_amt = exe_notional
# added by Ben Teng on 20170329
#print product_strategy, shortportfolio_flag, 'louisA', mv_in_report_ccy, original_mv
if shortportfolio_flag != None:
print product_strategy, shortportfolio_flag, 'louisB', original_mv, settlement_amt, today_fx
if shortportfolio_flag == True:
if product_strategy == 'SP_TRS':
if today_fx != 0.0:
mtm_exposure = mv_in_report_ccy - (settlement_amt * today_fx)
else:
mtm_exposure = 0.0
settlement_ccy = acm_ins.Currency().Name()
IA_ccy = report_ccy
# reverse the rate for indirect quotation pair
if report_ccy == 'USD' and ins_ccy not in ('AUD', 'EUR', 'GBP', 'NZD'):
today_fx = round(1.0 / today_fx, 6)
if ins_ccy == 'CNY':
ins_ccy = 'CNH'
if settlement_ccy == 'CNY':
settlement_ccy = 'CNH'
if IA_ccy == 'CNY':
IA_ccy = 'CNH'
if loan_ccy == 'CNY':
loan_ccy = 'CNH'
if product_strategy == 'SP_Portfolio Swap' and fileperpty:
est_all_time_lvt = '0'
est_coll_trigger_lvt = '0'
est_terminate_trigger_lvt = '0'
rptRow = [counterparty_ptyid, str(contract_date),external_ref,'TRS',ins_name,underlying_bbg,ins_ccy,qty,avg_price,exe_notional,init_price,settlement_ccy,settlement_amt,
ins_ccy,str(today_underlying_price),original_mv,str(report_ccy),str(today_fx),mv_in_report_ccy,str(all_time_LTV*100.0)+'%',str(trigger_LTV*100.0)+'%',
str(terminate_LTV*100.0)+'%',str(mtm_lvt*100.0)+'%',margin_pool,mtm_exposure,est_all_time_lvt,est_coll_trigger_lvt,est_terminate_trigger_lvt]
else:
rptRow = [counterparty_ptyid, str(contract_date),external_ref,'TRS',ins_name,underlying_bbg,ins_ccy,qty,avg_price,exe_notional,init_price,settlement_ccy,settlement_amt,IA_ccy,ini_cash_bal,avail_cash_bal,
loan_ccy,loan_amount,ins_ccy,str(today_underlying_price),original_mv,str(report_ccy),str(today_fx),mv_in_report_ccy,str(all_time_LTV*100.0)+'%',str(trigger_LTV*100.0)+'%',
str(terminate_LTV*100.0)+'%',str(mtm_lvt*100.0)+'%',margin_pool,mtm_exposure,est_all_time_lvt,est_coll_trigger_lvt,est_terminate_trigger_lvt,client_to_pay,edd_int,pwm_int]
rptContent.append(rptRow)
rptContent.sort(report_compare)
report = ReportLayout(title, groupbypty, product_strategy, fileperpty)
font = HTI_ExcelReport2.Font()
font.bold = True
reportData = HTI_ExcelReport2.ReportData()
reportData.newSheet = True
if product_strategy == 'SP_Portfolio Swap' and fileperpty:
reportData.headerText = ['Counterparty', 'Contract Date','Contract No','TRS','Security Name','BBG Code','Currency','Quantity','Avg. Exe. Price','Exe. Notional','Init Price',
'Settlement (CCY)', 'Settlement Amount', 'Currency','Closing Price','MV','FX Currency','Today FX','MV','All Time LTV','Coll. Trigger LTV',
'Terminate LTV','MTM LTV','Margin Pool','MTM Exposure','Est. All Time LTV','Est. Coll. Trigger LTV','Est. Terminate LTV']
else:
reportData.headerText = ['Counterparty', 'Contract Date','Contract No','TRS','Security Name','BBG Code','Currency','Quantity','Avg. Exe. Price','Exe. Notional','Init Price',
'Settlement (CCY)', 'Settlement Amount', 'IA (CCY)', 'Ini. Cash Bal.','Avail. Cash Bal.',
'Loan Curr','Loan Amount','Currency','Closing Price','MV','FX Currency','Today FX','MV','All Time LTV','Coll. Trigger LTV',
'Terminate LTV','MTM LTV','Margin Pool','MTM Exposure','Est. All Time LTV','Est. Coll. Trigger LTV','Est. Terminate LTV','Cum. Income Client To Pay', 'Cum. EDD Net Inc.', 'Cum. PWM Net Inc.']
reportData.rows = rptContent
if groupbypty and product_strategy != 'SP_Portfolio Swap' and not fileperpty:
reportData.addGroup([Counterparty], {'FONT': font}, {'SUM': [Quantity,Exe_Notional,Ini_Cash_Bal,Avail_Cash_Bal,Loan_Amount,MV_CURR,MV_HKD,Margin_Pool,MTM_Exposure], 'COL_TEXT': [Counterparty, Closing_Price, Today_FX, All_Time_LTV, Coll_Trigger_LTV, Terminate_LTV], 'CUSTOM_TEXT': {'COL': [Currency1], 'TEXT': ['Total']}})
report.addReportData(reportData, {'SUM': [], 'COL_TEXT': [], 'CUSTOM_TEXT': {'COL': [], 'TEXT': []}})
if saveToFile:
if not saveToCSV:
try:
fileName = fileName + '.xlsx'
if os.path.exists(fileName):
os.remove(fileName)
except:
pass
if sendEmail and len(emailList) != 0:
report.saveNoQuit(fileName)
else:
report.save(fileName)
else:
fileName = fileName + '.csv'
if os.path.isfile(fileName):
os.remove(fileName)
csvData = []
csvData.append(reportData.headerText)
csvData = csvData + reportData.rows
print fileName
try:
outPutFile = open(fileName,'wb')
csvWriter = csv.writer(outPutFile, delimiter=',', quotechar='"')
for row in csvData:
csvWriter.writerow(row)
outPutFile.flush()
finally:
outPutFile.close()
else:
report.show()
def getTRSUnderlying(acm_ins):
acm_und_ins = None
bbticker = ""
for acm_leg in acm_ins.Legs():
if acm_leg.PayLeg() == False:
acm_und_ins = acm_leg.FloatRateReference()
break
return acm_und_ins
def getUndInstrumentBBTicker(acm_ins):
bbticker = ''
acm_und_ins = getTRSUnderlying(acm_ins)
if acm_und_ins != None:
for aliase in acm_und_ins.Aliases():
if aliase.Type().Name() == 'BB_TICKER':
bbticker = aliase.Alias().strip()
break
return bbticker
def productStrategy_TRS(acm_ins, asofdate):
if acm_ins.InsType() != 'TotalReturnSwap':
return None
acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime <='%s'" % (acm_ins.Name(), ael.date(asofdate).add_days(1)))
acm_trd = None
if acm_trds != None:
for acm_trd in acm_trds:
return acm_trd.AdditionalInfo().Product_Strategy()
return None
class ReportLayout(HTI_ExcelReport2.CommonLayoutReport):
title = ''
Counterparty = 1
Contract_Date = 2
Contract_No = 3
TRS = 4
Security_Name = 5
BBG_Code = 6
Currency1 = 7
Quantity = 8
Avg_Exe_Price = 9
Exe_Notional = 10
Init_Price = 11
Settle_Ccy = 12
Settle_Amount = 13
IA_Ccy = 14
Ini_Cash_Bal = 15
Avail_Cash_Bal = 16
Loan_Curr = 17
Loan_Amount = 18
Currency2 = 19
Closing_Price = 20
MV_CURR = 21
FX_Currency = 22
Today_FX = 23
MV_HKD = 24
All_Time_LTV = 25
Coll_Trigger_LTV = 26
Terminate_LTV = 27
MTM_LVT = 28
Margin_Pool = 29
MTM_Exposure = 30
Est_All_Time_LTV = 31
Est_Coll_Trigger_LTV = 32
Est_Terminate_LTV = 33
Income_Client_to_Pay = 34
EDD_New_Income = 35
PWM_New_Income = 36
PSwap_Counterparty = 1
PSwap_Contract_Date = 2
PSwap_Contract_No = 3
PSwap_TRS = 4
PSwap_Security_Name = 5
PSwap_BBG_Code = 6
PSwap_Currency1 = 7
PSwap_Quantity = 8
PSwap_Avg_Exe_Price = 9
PSwap_Exe_Notional = 10
PSwap_Init_Price = 11
PSwap_Settle_Ccy = 12
PSwap_Settle_Amount = 13
PSwap_Currency2 = 14
PSwap_Closing_Price = 15
PSwap_MV_CURR = 16
PSwap_FX_Currency = 17
PSwap_Today_FX = 18
PSwap_MV_HKD = 19
PSwap_All_Time_LTV = 20
PSwap_Coll_Trigger_LTV = 21
PSwap_Terminate_LTV = 22
PSwap_MTM_LVT = 23
PSwap_Margin_Pool = 24
PSwap_MTM_Exposure = 25
PSwap_Est_All_Time_LTV = 26
PSwap_Est_Coll_Trigger_LTV = 27
PSwap_Est_Terminate_LTV = 28
PSwap_Income_Client_to_Pay = 29
PSwap_EDD_New_Income = 30
PSwap_PWM_New_Income = 31
groupbypty = True
product_strategy = 'SP_Portfolio Swap'
fileperpty = True
def __init__(self, title, groupbypty, product_strategy, fileperpty):
self.title = title
self.groupbypty = groupbypty
self.product_strategy = product_strategy
self.fileperpty = fileperpty
HTI_ExcelReport2.CommonLayoutReport.__init__(self)
def reportHeader(self, currentRow, reportIndex, excelApp):
# Write title
excelApp.Cells(currentRow[self.ROW], 1).Value = self.title
excelApp.Cells(currentRow[self.ROW], 1).Font.Bold = True
currentRow[self.ROW] = currentRow[self.ROW] + 1
HTI_ExcelReport2.CommonLayoutReport.reportHeader(self, currentRow, reportIndex, excelApp)
if self.product_strategy != 'SP_Portfolio Swap' and self.fileperpty:
excelApp.Columns(self.Quantity).NumberFormat = "#,##0.00"
excelApp.Columns(self.Avg_Exe_Price).NumberFormat = "#,##0.000000"
excelApp.Columns(self.Exe_Notional).NumberFormat = "#,##0.00"
excelApp.Columns(self.Init_Price).NumberFormat = "#,##0.000000"
excelApp.Columns(self.Ini_Cash_Bal).NumberFormat = "#,##0.00"
excelApp.Columns(self.Avail_Cash_Bal).NumberFormat = "#,##0.00"
excelApp.Columns(self.Loan_Amount).NumberFormat = "#,##0.00"
excelApp.Columns(self.Closing_Price).NumberFormat = "#,##0.000000"
excelApp.Columns(self.MV_CURR).NumberFormat = "#,##0.00"
excelApp.Columns(self.Today_FX).NumberFormat = "#,##0.000000"
excelApp.Columns(self.MV_HKD).NumberFormat = "#,##0.00"
excelApp.Columns(self.Settle_Amount).NumberFormat = "#,##0.00"
excelApp.Columns(self.Margin_Pool).NumberFormat = "#,##0.00"
excelApp.Columns(self.MTM_Exposure).NumberFormat = "#,##0.00"
excelApp.Columns(self.Income_Client_to_Pay).NumberFormat = "#,##0.00"
excelApp.Columns(self.EDD_New_Income).NumberFormat = "#,##0.00"
excelApp.Columns(self.PWM_New_Income).NumberFormat = "#,##0.00"
excelApp.Columns(self.Est_All_Time_LTV).NumberFormat = "#,##0.00"
excelApp.Columns(self.Est_Coll_Trigger_LTV).NumberFormat = "#,##0.00"
excelApp.Columns(self.Est_Terminate_LTV).NumberFormat = "#,##0.00"
excelApp.Columns(self.Contract_Date).NumberFormat = "YYYY-MM-DD"
excelApp.Columns(self.All_Time_LTV).NumberFormat = "0.00%"
excelApp.Columns(self.Coll_Trigger_LTV).NumberFormat = "0.00%"
excelApp.Columns(self.Terminate_LTV).NumberFormat = "0.00%"
else:
excelApp.Columns(self.PSwap_Quantity).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_Avg_Exe_Price).NumberFormat = "#,##0.000000"
excelApp.Columns(self.PSwap_Exe_Notional).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_Init_Price).NumberFormat = "#,##0.000000"
excelApp.Columns(self.PSwap_Closing_Price).NumberFormat = "#,##0.000000"
excelApp.Columns(self.PSwap_MV_CURR).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_Today_FX).NumberFormat = "#,##0.000000"
excelApp.Columns(self.PSwap_MV_HKD).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_Settle_Amount).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_Margin_Pool).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_MTM_Exposure).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_Income_Client_to_Pay).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_EDD_New_Income).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_PWM_New_Income).NumberFormat = "#,##0.00"
excelApp.Columns(self.PSwap_Est_All_Time_LTV).NumberFormat = "0.00%"
excelApp.Columns(self.PSwap_Est_Coll_Trigger_LTV).NumberFormat = "0.00%"
excelApp.Columns(self.PSwap_Est_Terminate_LTV).NumberFormat = "0.00%"
excelApp.Columns(self.PSwap_Contract_Date).NumberFormat = "YYYY-MM-DD"
#excelApp.Columns(self.PSwap_All_Time_LTV).NumberFormat = "0.00%"
#excelApp.Columns(self.PSwap_Coll_Trigger_LTV).NumberFormat = "0.00%"
#excelApp.Columns(self.PSwap_Terminate_LTV).NumberFormat = "0.00%"
def groupFooter(self, currentRow, reportIndex, group, excelApp):
HTI_ExcelReport2.CommonLayoutReport.groupFooter(self, currentRow, reportIndex, group, excelApp)
if self.groupbypty and self.product_strategy != 'SP_Portfolio Swap':
excelApp.Cells(currentRow[self.ROW] - 2, Init_Price+1).Value = abs(float(excelApp.Cells(currentRow[self.ROW] - 2, Exe_Notional+1).Value) / float(excelApp.Cells(currentRow[self.ROW] - 2, Quantity+1).Value))
loan_amount = float(excelApp.Cells(currentRow[self.ROW] - 2, Loan_Amount+1).Value)
all_time_ltv = float(excelApp.Cells(currentRow[self.ROW] - 2, All_Time_LTV+1).Value)
coll_trigger_ltv = float(excelApp.Cells(currentRow[self.ROW] - 2, Coll_Trigger_LTV+1).Value)
terminate_ltv = float(excelApp.Cells(currentRow[self.ROW] - 2, Terminate_LTV+1).Value)
margin_pool = float(excelApp.Cells(currentRow[self.ROW] - 2, Margin_Pool+1).Value)
today_fx = float(excelApp.Cells(currentRow[self.ROW] - 2, Today_FX+1).Value)
avail_cash_bal = float(excelApp.Cells(currentRow[self.ROW] - 2, Avail_Cash_Bal+1).Value)
quantity = float(excelApp.Cells(currentRow[self.ROW] - 2, Quantity+1).Value)
mv_curr = float(excelApp.Cells(currentRow[self.ROW] - 2, MV_CURR+1).Value)
#loan_amount = float(excelApp.Cells(currentRow[self.ROW] - 2, Loan_Amount+1).Value)
if all_time_ltv != 0.0 and quantity != 0.0 and today_fx != 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, Est_All_Time_LTV+1).Value = str(round(((loan_amount / all_time_ltv - margin_pool) / today_fx - avail_cash_bal) / quantity, 2))
if float(excelApp.Cells(currentRow[self.ROW] - 2, Est_All_Time_LTV+1).Value) == 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, Est_All_Time_LTV+1).Value = ''
if coll_trigger_ltv != 0.0 and quantity != 0.0 and today_fx != 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, Est_Coll_Trigger_LTV+1).Value = str(round(((loan_amount / coll_trigger_ltv - margin_pool) / today_fx - avail_cash_bal) / quantity, 2))
if float(excelApp.Cells(currentRow[self.ROW] - 2, Est_Coll_Trigger_LTV+1).Value) == 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, Est_Coll_Trigger_LTV+1).Value = ''
if terminate_ltv != 0.0 and quantity != 0.0 and today_fx != 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, Est_Terminate_LTV+1).Value = str(round(((loan_amount / terminate_ltv - margin_pool) / today_fx - avail_cash_bal) / quantity, 2))
if float(excelApp.Cells(currentRow[self.ROW] - 2, Est_Terminate_LTV+1).Value) == 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, Est_Terminate_LTV+1).Value = ''
val = (((mv_curr + avail_cash_bal)* today_fx) + margin_pool)
if val != 0.0:
mtm_lvt_ttl = loan_amount / val * 100.0
if mtm_lvt_ttl != 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, MTM_LVT+1).Value = str(mtm_lvt_ttl) + '%'
else:
init_price = float(excelApp.Cells(currentRow[self.ROW] - 2, Init_Price+1).Value)
cls_price = float(excelApp.Cells(currentRow[self.ROW] - 2, Closing_Price+1).Value)
if loan_amount == 0.0 and cls_price != 0.0:
excelApp.Cells(currentRow[self.ROW] - 2, MTM_LVT+1).Value = str(round(init_price / cls_price * 100.0, 2)) + '%'
else:
excelApp.Cells(currentRow[self.ROW] - 2, MTM_LVT+1).Value = ''
# Requested by Ben Teng on 29/3/2017 no need to show sum for columns below
excelApp.Cells(currentRow[self.ROW] - 2, FX_Currency+1).Value = ''
excelApp.Cells(currentRow[self.ROW] - 2, Init_Price+1).Value = ''
excelApp.Cells(currentRow[self.ROW] - 2, Today_FX+1).Value = ''
excelApp.Cells(currentRow[self.ROW] - 2, MTM_LVT+1).Value = ''
excelApp.Cells(currentRow[self.ROW] - 2, Closing_Price+1).Value = ''
def reportEnd(self, excelApp):
HTI_ExcelReport2.CommonLayoutReport.reportEnd(self, excelApp)
excelApp.Columns(self.Contract_Date).Select()
excelApp.Selection.HorizontalAlignment = HTI_ExcelReport2.ExcelConstant.xlLeft
excelApp.Columns(self.Income_Client_to_Pay).Select()
excelApp.Selection.EntireColumn.Delete()
excelApp.Columns(self.Income_Client_to_Pay).Select()
excelApp.Selection.EntireColumn.Delete()
excelApp.Columns(self.Income_Client_to_Pay).Select()
excelApp.Selection.EntireColumn.Delete()
'''
excelApp.Columns("L:L").Select()
excelApp.Selection.HorizontalAlignment = HTI_ExcelReport2.ExcelConstant.xlRight
'''
excelApp.Cells(1, 1).Select()
|
|
import contextlib
import time
import unittest
from datetime import date, datetime
from django.core.exceptions import FieldError
from django.db import connection, models
from django.test import TestCase, override_settings
from django.utils import timezone
from .models import Article, Author, MySQLUnixTimestamp
@contextlib.contextmanager
def register_lookup(field, *lookups):
try:
for lookup in lookups:
field.register_lookup(lookup)
yield
finally:
for lookup in lookups:
field._unregister_lookup(lookup)
class Div3Lookup(models.Lookup):
lookup_name = 'div3'
def as_sql(self, compiler, connection):
lhs, params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
return '(%s) %%%% 3 = %s' % (lhs, rhs), params
def as_oracle(self, compiler, connection):
lhs, params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
return 'mod(%s, 3) = %s' % (lhs, rhs), params
class Div3Transform(models.Transform):
lookup_name = 'div3'
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return '(%s) %%%% 3' % lhs, lhs_params
def as_oracle(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return 'mod(%s, 3)' % lhs, lhs_params
class Div3BilateralTransform(Div3Transform):
bilateral = True
class Mult3BilateralTransform(models.Transform):
bilateral = True
lookup_name = 'mult3'
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return '3 * (%s)' % lhs, lhs_params
class UpperBilateralTransform(models.Transform):
bilateral = True
lookup_name = 'upper'
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return 'UPPER(%s)' % lhs, lhs_params
class YearTransform(models.Transform):
# Use a name that avoids collision with the built-in year lookup.
lookup_name = 'testyear'
def as_sql(self, compiler, connection):
lhs_sql, params = compiler.compile(self.lhs)
return connection.ops.date_extract_sql('year', lhs_sql), params
@property
def output_field(self):
return models.IntegerField()
@YearTransform.register_lookup
class YearExact(models.lookups.Lookup):
lookup_name = 'exact'
def as_sql(self, compiler, connection):
# We will need to skip the extract part, and instead go
# directly with the originating field, that is self.lhs.lhs
lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
# Note that we must be careful so that we have params in the
# same order as we have the parts in the SQL.
params = lhs_params + rhs_params + lhs_params + rhs_params
# We use PostgreSQL specific SQL here. Note that we must do the
# conversions in SQL instead of in Python to support F() references.
return ("%(lhs)s >= (%(rhs)s || '-01-01')::date "
"AND %(lhs)s <= (%(rhs)s || '-12-31')::date" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
@YearTransform.register_lookup
class YearLte(models.lookups.LessThanOrEqual):
"""
The purpose of this lookup is to efficiently compare the year of the field.
"""
def as_sql(self, compiler, connection):
# Skip the YearTransform above us (no possibility for efficient
# lookup otherwise).
real_lhs = self.lhs.lhs
lhs_sql, params = self.process_lhs(compiler, connection, real_lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
# Build SQL where the integer year is concatenated with last month
# and day, then convert that to date. (We try to have SQL like:
# WHERE somecol <= '2013-12-31')
# but also make it work if the rhs_sql is field reference.
return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params
class Exactly(models.lookups.Exact):
"""
This lookup is used to test lookup registration.
"""
lookup_name = 'exactly'
def get_rhs_op(self, connection, rhs):
return connection.operators['exact'] % rhs
class SQLFuncMixin:
def as_sql(self, compiler, connection):
return '%s()', [self.name]
@property
def output_field(self):
return CustomField()
class SQLFuncLookup(SQLFuncMixin, models.Lookup):
def __init__(self, name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
class SQLFuncTransform(SQLFuncMixin, models.Transform):
def __init__(self, name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
class SQLFuncFactory:
def __init__(self, key, name):
self.key = key
self.name = name
def __call__(self, *args, **kwargs):
if self.key == 'lookupfunc':
return SQLFuncLookup(self.name, *args, **kwargs)
return SQLFuncTransform(self.name, *args, **kwargs)
class CustomField(models.TextField):
def get_lookup(self, lookup_name):
if lookup_name.startswith('lookupfunc_'):
key, name = lookup_name.split('_', 1)
return SQLFuncFactory(key, name)
return super().get_lookup(lookup_name)
def get_transform(self, lookup_name):
if lookup_name.startswith('transformfunc_'):
key, name = lookup_name.split('_', 1)
return SQLFuncFactory(key, name)
return super().get_transform(lookup_name)
class CustomModel(models.Model):
field = CustomField()
# We will register this class temporarily in the test method.
class InMonth(models.lookups.Lookup):
"""
InMonth matches if the column's month is the same as value's month.
"""
lookup_name = 'inmonth'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
# We need to be careful so that we get the params in right
# places.
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%s >= date_trunc('month', %s) and "
"%s < date_trunc('month', %s) + interval '1 months'" %
(lhs, rhs, lhs, rhs), params)
class DateTimeTransform(models.Transform):
lookup_name = 'as_datetime'
@property
def output_field(self):
return models.DateTimeField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'from_unixtime({})'.format(lhs), params
class LookupTests(TestCase):
def test_custom_name_lookup(self):
a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
custom_lookup_name = 'isactually'
custom_transform_name = 'justtheyear'
try:
models.DateField.register_lookup(YearTransform)
models.DateField.register_lookup(YearTransform, custom_transform_name)
YearTransform.register_lookup(Exactly)
YearTransform.register_lookup(Exactly, custom_lookup_name)
qs1 = Author.objects.filter(birthdate__testyear__exactly=1981)
qs2 = Author.objects.filter(birthdate__justtheyear__isactually=1981)
self.assertSequenceEqual(qs1, [a1])
self.assertSequenceEqual(qs2, [a1])
finally:
YearTransform._unregister_lookup(Exactly)
YearTransform._unregister_lookup(Exactly, custom_lookup_name)
models.DateField._unregister_lookup(YearTransform)
models.DateField._unregister_lookup(YearTransform, custom_transform_name)
def test_basic_lookup(self):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
with register_lookup(models.IntegerField, Div3Lookup):
self.assertSequenceEqual(Author.objects.filter(age__div3=0), [a3])
self.assertSequenceEqual(Author.objects.filter(age__div3=1).order_by('age'), [a1, a4])
self.assertSequenceEqual(Author.objects.filter(age__div3=2), [a2])
self.assertSequenceEqual(Author.objects.filter(age__div3=3), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_birthdate_month(self):
a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
with register_lookup(models.DateField, InMonth):
self.assertSequenceEqual(Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)), [a3])
self.assertSequenceEqual(Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)), [a2])
self.assertSequenceEqual(Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)), [a1])
self.assertSequenceEqual(Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)), [a4])
self.assertSequenceEqual(Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)), [])
def test_div3_extract(self):
with register_lookup(models.IntegerField, Div3Transform):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertSequenceEqual(baseqs.filter(age__div3=2), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__lte=3), [a1, a2, a3, a4])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[0, 2]), [a2, a3])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[2, 4]), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__gte=3), [])
self.assertSequenceEqual(baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4])
def test_foreignobject_lookup_registration(self):
field = Article._meta.get_field('author')
with register_lookup(models.ForeignObject, Exactly):
self.assertIs(field.get_lookup('exactly'), Exactly)
# ForeignObject should ignore regular Field lookups
with register_lookup(models.Field, Exactly):
self.assertIsNone(field.get_lookup('exactly'))
def test_lookups_caching(self):
field = Article._meta.get_field('author')
# clear and re-cache
field.get_lookups.cache_clear()
self.assertNotIn('exactly', field.get_lookups())
# registration should bust the cache
with register_lookup(models.ForeignObject, Exactly):
# getting the lookups again should re-cache
self.assertIn('exactly', field.get_lookups())
class BilateralTransformTests(TestCase):
def test_bilateral_upper(self):
with register_lookup(models.CharField, UpperBilateralTransform):
Author.objects.bulk_create([
Author(name='Doe'),
Author(name='doe'),
Author(name='Foo'),
])
self.assertQuerysetEqual(
Author.objects.filter(name__upper='doe'),
["<Author: Doe>", "<Author: doe>"], ordered=False)
self.assertQuerysetEqual(
Author.objects.filter(name__upper__contains='f'),
["<Author: Foo>"], ordered=False)
def test_bilateral_inner_qs(self):
with register_lookup(models.CharField, UpperBilateralTransform):
msg = 'Bilateral transformations on nested querysets are not supported.'
with self.assertRaisesMessage(NotImplementedError, msg):
Author.objects.filter(name__upper__in=Author.objects.values_list('name'))
def test_bilateral_multi_value(self):
with register_lookup(models.CharField, UpperBilateralTransform):
Author.objects.bulk_create([
Author(name='Foo'),
Author(name='Bar'),
Author(name='Ray'),
])
self.assertQuerysetEqual(
Author.objects.filter(name__upper__in=['foo', 'bar', 'doe']).order_by('name'),
['Bar', 'Foo'],
lambda a: a.name
)
def test_div3_bilateral_extract(self):
with register_lookup(models.IntegerField, Div3BilateralTransform):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertSequenceEqual(baseqs.filter(age__div3=2), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__lte=3), [a3])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[0, 2]), [a2, a3])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[2, 4]), [a1, a2, a4])
self.assertSequenceEqual(baseqs.filter(age__div3__gte=3), [a1, a2, a3, a4])
self.assertSequenceEqual(baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4])
def test_bilateral_order(self):
with register_lookup(models.IntegerField, Mult3BilateralTransform, Div3BilateralTransform):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
# mult3__div3 always leads to 0
self.assertSequenceEqual(baseqs.filter(age__mult3__div3=42), [a1, a2, a3, a4])
self.assertSequenceEqual(baseqs.filter(age__div3__mult3=42), [a3])
def test_bilateral_fexpr(self):
with register_lookup(models.IntegerField, Mult3BilateralTransform):
a1 = Author.objects.create(name='a1', age=1, average_rating=3.2)
a2 = Author.objects.create(name='a2', age=2, average_rating=0.5)
a3 = Author.objects.create(name='a3', age=3, average_rating=1.5)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertSequenceEqual(baseqs.filter(age__mult3=models.F('age')), [a1, a2, a3, a4])
# Same as age >= average_rating
self.assertSequenceEqual(baseqs.filter(age__mult3__gte=models.F('average_rating')), [a2, a3])
@override_settings(USE_TZ=True)
class DateTimeLookupTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific SQL used")
def test_datetime_output_field(self):
with register_lookup(models.PositiveIntegerField, DateTimeTransform):
ut = MySQLUnixTimestamp.objects.create(timestamp=time.time())
y2k = timezone.make_aware(datetime(2000, 1, 1))
self.assertSequenceEqual(MySQLUnixTimestamp.objects.filter(timestamp__as_datetime__gt=y2k), [ut])
class YearLteTests(TestCase):
def setUp(self):
models.DateField.register_lookup(YearTransform)
self.a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
self.a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
self.a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
self.a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
def tearDown(self):
models.DateField._unregister_lookup(YearTransform)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte(self):
baseqs = Author.objects.order_by('name')
self.assertSequenceEqual(baseqs.filter(birthdate__testyear__lte=2012), [self.a1, self.a2, self.a3, self.a4])
self.assertSequenceEqual(baseqs.filter(birthdate__testyear=2012), [self.a2, self.a3, self.a4])
self.assertNotIn('BETWEEN', str(baseqs.filter(birthdate__testyear=2012).query))
self.assertSequenceEqual(baseqs.filter(birthdate__testyear__lte=2011), [self.a1])
# The non-optimized version works, too.
self.assertSequenceEqual(baseqs.filter(birthdate__testyear__lt=2012), [self.a1])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte_fexpr(self):
self.a2.age = 2011
self.a2.save()
self.a3.age = 2012
self.a3.save()
self.a4.age = 2013
self.a4.save()
baseqs = Author.objects.order_by('name')
self.assertSequenceEqual(baseqs.filter(birthdate__testyear__lte=models.F('age')), [self.a3, self.a4])
self.assertSequenceEqual(baseqs.filter(birthdate__testyear__lt=models.F('age')), [self.a4])
def test_year_lte_sql(self):
# This test will just check the generated SQL for __lte. This
# doesn't require running on PostgreSQL and spots the most likely
# error - not running YearLte SQL at all.
baseqs = Author.objects.order_by('name')
self.assertIn(
'<= (2011 || ', str(baseqs.filter(birthdate__testyear__lte=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__testyear__lte=2011).query))
def test_postgres_year_exact(self):
baseqs = Author.objects.order_by('name')
self.assertIn(
'= (2011 || ', str(baseqs.filter(birthdate__testyear=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__testyear=2011).query))
def test_custom_implementation_year_exact(self):
try:
# Two ways to add a customized implementation for different backends:
# First is MonkeyPatch of the class.
def as_custom_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(concat(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(concat(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(YearExact, 'as_' + connection.vendor, as_custom_sql)
self.assertIn(
'concat(',
str(Author.objects.filter(birthdate__testyear=2012).query))
finally:
delattr(YearExact, 'as_' + connection.vendor)
try:
# The other way is to subclass the original lookup and register the subclassed
# lookup instead of the original.
class CustomYearExact(YearExact):
# This method should be named "as_mysql" for MySQL, "as_postgresql" for postgres
# and so on, but as we don't know which DB we are running on, we need to use
# setattr.
def as_custom_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(CONCAT(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(CONCAT(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(CustomYearExact, 'as_' + connection.vendor, CustomYearExact.as_custom_sql)
YearTransform.register_lookup(CustomYearExact)
self.assertIn(
'CONCAT(',
str(Author.objects.filter(birthdate__testyear=2012).query))
finally:
YearTransform._unregister_lookup(CustomYearExact)
YearTransform.register_lookup(YearExact)
class TrackCallsYearTransform(YearTransform):
# Use a name that avoids collision with the built-in year lookup.
lookup_name = 'testyear'
call_order = []
def as_sql(self, compiler, connection):
lhs_sql, params = compiler.compile(self.lhs)
return connection.ops.date_extract_sql('year', lhs_sql), params
@property
def output_field(self):
return models.IntegerField()
def get_lookup(self, lookup_name):
self.call_order.append('lookup')
return super().get_lookup(lookup_name)
def get_transform(self, lookup_name):
self.call_order.append('transform')
return super().get_transform(lookup_name)
class LookupTransformCallOrderTests(TestCase):
def test_call_order(self):
with register_lookup(models.DateField, TrackCallsYearTransform):
# junk lookup - tries lookup, then transform, then fails
msg = "Unsupported lookup 'junk' for IntegerField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(birthdate__testyear__junk=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup', 'transform'])
TrackCallsYearTransform.call_order = []
# junk transform - tries transform only, then fails
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(birthdate__testyear__junk__more_junk=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['transform'])
TrackCallsYearTransform.call_order = []
# Just getting the year (implied __exact) - lookup only
Author.objects.filter(birthdate__testyear=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup'])
TrackCallsYearTransform.call_order = []
# Just getting the year (explicit __exact) - lookup only
Author.objects.filter(birthdate__testyear__exact=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup'])
class CustomisedMethodsTests(TestCase):
def test_overridden_get_lookup(self):
q = CustomModel.objects.filter(field__lookupfunc_monkeys=3)
self.assertIn('monkeys()', str(q.query))
def test_overridden_get_transform(self):
q = CustomModel.objects.filter(field__transformfunc_banana=3)
self.assertIn('banana()', str(q.query))
def test_overridden_get_lookup_chain(self):
q = CustomModel.objects.filter(field__transformfunc_banana__lookupfunc_elephants=3)
self.assertIn('elephants()', str(q.query))
def test_overridden_get_transform_chain(self):
q = CustomModel.objects.filter(field__transformfunc_banana__transformfunc_pear=3)
self.assertIn('pear()', str(q.query))
class SubqueryTransformTests(TestCase):
def test_subquery_usage(self):
with register_lookup(models.IntegerField, Div3Transform):
Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
Author.objects.create(name='a3', age=3)
Author.objects.create(name='a4', age=4)
qs = Author.objects.order_by('name').filter(id__in=Author.objects.filter(age__div3=2))
self.assertSequenceEqual(qs, [a2])
|
|
from __future__ import unicode_literals
import unittest
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.utils import six
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import Page, GroupPagePermission
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
# see https://github.com/torchbox/wagtail/issues/565
get_user_model().objects.create_user('guardian', 'guardian@example.com', 'gu@rd14n', id=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, 'test@user.com')
def test_create_with_password_mismatch(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password1",
'password2': "password2",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
# Login
self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(id=self.test_user.id)
self.assertEqual(user.first_name, 'Edited')
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
@unittest.expectedFailure
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(id=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.id],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'], # as we have one page permission already
'page_permissions-0-id': [self.root_add_permission.id],
'page_permissions-0-page': [self.root_add_permission.page.id],
'page_permissions-0-permission_type': [self.root_add_permission.permission_type]
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(id__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(id=self.test_group.id)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-2-id': [''],
'page_permissions-2-page': ['1'],
'page_permissions-2-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['3'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permissions to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
root_edit_perm = GroupPagePermission.objects.create(page=self.root_page,
permission_type='edit',
group=self.test_group)
# The test group now has two page permissions
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
self.assertEqual(response.context['formset'].forms[1].instance, root_edit_perm)
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': [self.root_add_permission.page.id],
'page_permissions-1-permission_type': [self.root_add_permission.permission_type],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_form_includes_non_registered_permissions_in_initial_data(self):
self.add_non_registered_perm()
original_permissions = self.test_group.permissions.all()
self.assertEqual(original_permissions.count(), 2)
response = self.get()
# See that the form is set up with the correct initial data
self.assertEqual(
response.context['form'].initial.get('permissions'),
list(original_permissions.values_list('id', flat=True))
)
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the exsisting
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
|
|
"""Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import datetime
import os
import threading
from unittest.mock import AsyncMock, Mock, patch
import pytest
import voluptuous as vol
from homeassistant import config_entries, setup
import homeassistant.config as config_util
from homeassistant.const import EVENT_COMPONENT_LOADED, EVENT_HOMEASSISTANT_START
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
MockModule,
MockPlatform,
assert_setup_component,
get_test_config_dir,
get_test_home_assistant,
mock_entity_platform,
mock_integration,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
@pytest.fixture(autouse=True)
def mock_handlers():
"""Mock config flows."""
class MockFlowHandler(config_entries.ConfigFlow):
"""Define a mock flow handler."""
VERSION = 1
with patch.dict(config_entries.HANDLERS, {"comp": MockFlowHandler}):
yield
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({"comp_conf": {"hello": str}}, required=True)
mock_integration(
self.hass, MockModule("comp_conf", config_schema=config_schema)
)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass, "comp_conf", {"comp_conf": None}
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {"comp_conf": {}})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass,
"comp_conf",
{"comp_conf": {"hello": "world", "invalid": "extra"}},
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(
self.hass, "comp_conf", {"comp_conf": {"hello": "world"}}
)
def test_validate_platform_config(self, caplog):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({})
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=platform_schema_base),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "not_existing", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "whatever", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": [{"platform": "whatever", "hello": "world"}]},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": None}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": {}}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
def test_validate_platform_config_2(self, caplog):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({"hello": "world"})
mock_integration(
self.hass,
MockModule(
"platform_conf",
platform_schema=platform_schema,
platform_schema_base=platform_schema_base,
),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema_base
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_3(self, caplog):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({"hello": str})
platform_schema = PLATFORM_SCHEMA.extend({"cheers": str, "hello": "world"})
mock_integration(
self.hass, MockModule("platform_conf", platform_schema=component_schema)
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=component_schema),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
"platform_conf": {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
"platform": "whatever",
"entity_namespace": "yummy",
}
},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert setup.setup_component(self.hass, "non_existing", {}) is False
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = Mock(return_value=True)
mock_integration(self.hass, MockModule("comp", setup=mock_setup))
assert setup.setup_component(self.hass, "comp", {})
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, "comp", {})
assert not mock_setup.called
@patch("homeassistant.util.package.install_package", return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"]))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
async def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
mock_integration(self.hass, MockModule("comp", async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, "comp", {})
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, "comp", {})
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ["maybe_existing"]
mock_integration(self.hass, MockModule("comp", dependencies=deps))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(self.hass, MockModule("comp2", dependencies=deps))
mock_integration(self.hass, MockModule("maybe_existing"))
assert setup.setup_component(self.hass, "comp2", {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
mock_integration(
self.hass, MockModule("comp", setup=lambda hass, config: False)
)
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception("fail!")
mock_integration(self.hass, MockModule("comp", setup=exception_setup))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get("comp_a", {}).get("valid", False):
return True
raise Exception(f"Config not passed in: {config}")
platform = MockPlatform()
mock_integration(self.hass, MockModule("comp_a", setup=config_check_setup))
mock_integration(
self.hass,
MockModule("platform_a", setup=config_check_setup, dependencies=["comp_a"]),
)
mock_entity_platform(self.hass, "switch.platform_a", platform)
setup.setup_component(
self.hass,
"switch",
{"comp_a": {"valid": True}, "switch": {"platform": "platform_a"}},
)
self.hass.block_till_done()
assert "comp_a" in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend(
{"valid": True}, extra=vol.PREVENT_EXTRA
)
mock_setup = Mock(spec_set=True)
mock_entity_platform(
self.hass,
"switch.platform_a",
MockPlatform(platform_schema=platform_schema, setup_platform=mock_setup),
)
with assert_setup_component(0, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "invalid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"switch",
{
"switch": {
"platform": "platform_a",
"valid": True,
"invalid_extra": True,
}
},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(1, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "valid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: None)
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule("disabled_component", setup=lambda hass, config: False),
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: True)
)
assert setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
async def component1_setup(hass, config):
"""Set up mock component."""
await discovery.async_discover(
hass, "test_component2", {}, "test_component2", {}
)
await discovery.async_discover(
hass, "test_component3", {}, "test_component3", {}
)
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
mock_integration(
self.hass, MockModule("test_component1", async_setup=component1_setup)
)
mock_integration(
self.hass, MockModule("test_component2", setup=component_track_setup)
)
mock_integration(
self.hass, MockModule("test_component3", setup=component_track_setup)
)
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(self.hass, "test_component1", {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
async def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
mock_integration(hass, MockModule("test_component1"))
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
async def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
mock_integration(
hass, MockModule("test_component1", platform_schema=PLATFORM_SCHEMA)
)
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert len(mock_call.mock_calls) == 0
async def test_platform_error_slow_setup(hass, caplog):
"""Don't block startup more than SLOW_SETUP_MAX_WAIT."""
with patch.object(setup, "SLOW_SETUP_MAX_WAIT", 1):
called = []
async def async_setup(*args):
"""Tracking Setup."""
called.append(1)
await asyncio.sleep(2)
mock_integration(hass, MockModule("test_component1", async_setup=async_setup))
result = await setup.async_setup_component(hass, "test_component1", {})
assert len(called) == 1
assert not result
assert "test_component1 is taking longer than 1 seconds" in caplog.text
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add("test")
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Should be called right away
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
async def test_async_when_setup_or_start_already_loaded(hass):
"""Test when setup or start."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup_or_start(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add("test")
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Should be called right away
setup.async_when_setup_or_start(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
setup.async_when_setup_or_start(hass, "not_loaded", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert calls == ["test", "test", "not_loaded"]
async def test_setup_import_blows_up(hass):
"""Test that we handle it correctly when importing integration blows up."""
with patch(
"homeassistant.loader.Integration.get_component", side_effect=ValueError
):
assert not await setup.async_setup_component(hass, "sun", {})
async def test_parallel_entry_setup(hass):
"""Test config entries are set up in parallel."""
MockConfigEntry(domain="comp", data={"value": 1}).add_to_hass(hass)
MockConfigEntry(domain="comp", data={"value": 2}).add_to_hass(hass)
calls = []
async def mock_async_setup_entry(hass, entry):
"""Mock setting up an entry."""
calls.append(entry.data["value"])
await asyncio.sleep(0)
calls.append(entry.data["value"])
return True
mock_integration(
hass,
MockModule(
"comp",
async_setup_entry=mock_async_setup_entry,
),
)
mock_entity_platform(hass, "config_flow.comp", None)
await setup.async_setup_component(hass, "comp", {})
assert calls == [1, 2, 1, 2]
async def test_integration_disabled(hass, caplog):
"""Test we can disable an integration."""
disabled_reason = "Dependency contains code that breaks Home Assistant"
mock_integration(
hass,
MockModule("test_component1", partial_manifest={"disabled": disabled_reason}),
)
result = await setup.async_setup_component(hass, "test_component1", {})
assert not result
assert disabled_reason in caplog.text
async def test_async_get_loaded_integrations(hass):
"""Test we can enumerate loaded integations."""
hass.config.components.add("notbase")
hass.config.components.add("switch")
hass.config.components.add("notbase.switch")
hass.config.components.add("myintegration")
hass.config.components.add("device_tracker")
hass.config.components.add("device_tracker.other")
hass.config.components.add("myintegration.light")
assert setup.async_get_loaded_integrations(hass) == {
"other",
"switch",
"notbase",
"myintegration",
"device_tracker",
}
async def test_integration_no_setup(hass, caplog):
"""Test we fail integration setup without setup functions."""
mock_integration(
hass,
MockModule("test_integration_without_setup", setup=False),
)
result = await setup.async_setup_component(
hass, "test_integration_without_setup", {}
)
assert not result
assert "No setup or config entry setup function defined" in caplog.text
async def test_integration_only_setup_entry(hass):
"""Test we have an integration with only a setup entry method."""
mock_integration(
hass,
MockModule(
"test_integration_only_entry",
setup=False,
async_setup_entry=AsyncMock(return_value=True),
),
)
assert await setup.async_setup_component(hass, "test_integration_only_entry", {})
async def test_async_start_setup(hass):
"""Test setup started context manager keeps track of setup times."""
with setup.async_start_setup(hass, ["august"]):
assert isinstance(
hass.data[setup.DATA_SETUP_STARTED]["august"], datetime.datetime
)
with setup.async_start_setup(hass, ["august"]):
assert isinstance(
hass.data[setup.DATA_SETUP_STARTED]["august_2"], datetime.datetime
)
assert "august" not in hass.data[setup.DATA_SETUP_STARTED]
assert isinstance(hass.data[setup.DATA_SETUP_TIME]["august"], datetime.timedelta)
assert "august_2" not in hass.data[setup.DATA_SETUP_TIME]
async def test_async_start_setup_platforms(hass):
"""Test setup started context manager keeps track of setup times for platforms."""
with setup.async_start_setup(hass, ["sensor.august"]):
assert isinstance(
hass.data[setup.DATA_SETUP_STARTED]["sensor.august"], datetime.datetime
)
assert "august" not in hass.data[setup.DATA_SETUP_STARTED]
assert isinstance(hass.data[setup.DATA_SETUP_TIME]["august"], datetime.timedelta)
assert "sensor" not in hass.data[setup.DATA_SETUP_TIME]
|
|
from .. import names
from ..builder import Builder
from ..ndtypes import (ScalarT, NoneT, NoneType, ArrayT, SliceT, TupleT, make_tuple_type,
Int64, PtrT, ptr_type, ClosureT, FnT, StructT,
TypeValueT)
from ..syntax import (Var, Tuple,
Index, TypedFn, Return, Stmt, Assign, Alloc,
ParFor, PrimCall, If, While, ForLoop, Call, Expr,
IndexReduce, ExprStmt)
from ..syntax.helpers import none, const_int
from transform import Transform
def concat(seqs):
result = []
for seq in seqs:
for elt in seq:
result.append(elt)
return tuple(result)
def concat_args(*seqs):
return concat(seqs)
def concat_map(f, seq):
return concat(f(elt) for elt in seq)
def flatten_type(t):
if isinstance(t, (ScalarT, PtrT)):
return (t,)
elif isinstance(t, TupleT):
return concat(flatten_type(elt_t) for elt_t in t.elt_types)
elif isinstance(t, (NoneT, FnT, TypeValueT)):
return ()
elif isinstance(t, ClosureT):
return concat(flatten_type(elt_t) for elt_t in t.arg_types)
elif isinstance(t, ArrayT):
return concat_args(
flatten_type(t.ptr_t),
flatten_type(t.shape_t),
flatten_type(t.strides_t),
(Int64, Int64), # offset and size
)
elif isinstance(t, SliceT):
return flatten_types( (t.start_type, t.stop_type, t.step_type) )
else:
assert False, "Unsupported type %s" % (t,)
def flatten_types(ts):
return concat([flatten_type(t) for t in ts])
def field_pos_range(t, field, _cache = {}):
key = (t, field)
if key in _cache:
return _cache[key]
assert isinstance(t, StructT), "Expected struct got %s.%s" % (t, field)
offset = 0
for i, (field_name, field_t) in enumerate(t._fields_):
n = len(flatten_type(field_t))
if field_name == field or (isinstance(field, (int, long)) and i == field):
result = (offset, offset+n)
_cache[key] = result
return result
offset += n
assert False, "Field %s not found on type %s" % (field, t)
def get_field_elts(t, values, field):
start, stop = field_pos_range(t, field)
assert stop <= len(values), \
"Insufficient number of flattened fields %s for %s.%s" % (values, t, field)
return values[start:stop]
def single_type(ts):
"""
Turn a sequence of types into a single type object
"""
if len(ts) == 0:
return NoneType
elif len(ts) == 1:
return ts[0]
else:
return make_tuple_type(ts)
def single_value(values):
if len(values) == 0:
return none
elif len(values) == 1:
return values[0]
else:
t = make_tuple_type(tuple(v.type for v in values))
return Tuple(values, type = t)
def mk_vars(names, types):
"""
Combine a list of names and a list of types into a single list of vars
"""
return [Var(name = name, type = t) for name, t in zip(names, types)]
class BuildFlatFn(Builder):
def __init__(self, old_fn):
Builder.__init__(self)
self.old_fn = old_fn
base_name = names.original(old_fn.name)
flat_fn_name = names.fresh("flat_" + base_name)
self.type_env = {}
for (name, t) in old_fn.type_env.iteritems():
old_var = Var(name = name, type = t)
for new_var in self.flatten_lhs_var(old_var):
self.type_env[new_var.name] = new_var.type
old_input_vars = mk_vars(old_fn.arg_names, old_fn.input_types)
# a var of struct type from the old fn
# maps to multiple variables in the new
# function
self.var_expansions = {}
new_input_vars = []
for var in old_input_vars:
flat_vars = self.flatten_lhs_var(var)
self.var_expansions[var.name] = flat_vars
new_input_vars.extend(flat_vars)
new_input_names = tuple(var.name for var in new_input_vars)
new_input_types = tuple(var.type for var in new_input_vars)
new_return_type = single_type(flatten_type(old_fn.return_type))
self.flat_fn = \
TypedFn(name = flat_fn_name,
arg_names = new_input_names,
body = [],
type_env = self.type_env,
input_types = new_input_types,
return_type = new_return_type,
created_by = self)
def run(self):
self.flat_fn.body = self.flatten_block(self.old_fn.body)
return self.flat_fn
#######################
#
# Helpers
#
#######################
def flatten_block(self, stmts):
self.blocks.push()
for stmt in stmts:
result = self.flatten_stmt(stmt)
if result is None:
continue
if not isinstance(result, (list,tuple)):
result = [result]
for new_stmt in result:
assert isinstance(new_stmt, Stmt), "Expected statement, got %s" % (new_stmt,)
self.blocks.append(new_stmt)
return self.blocks.pop()
#######################
#
# Statements
#
#######################
#def bind_name(self):
#def bind_var(self, lhs, rhs):
def flatten_Assign(self, stmt):
c = stmt.lhs.__class__
rhs = self.flatten_expr(stmt.rhs)
if c is Var:
lhs_vars = self.flatten_lhs_var(stmt.lhs)
self.var_expansions[stmt.lhs.name] = lhs_vars
if isinstance(rhs, Expr):
if len(lhs_vars) == 1:
return [Assign(lhs_vars[0], rhs)]
else:
# the IR doesn't allow for multiple assignment
# so we fake it with tuple literals
return [Assign(self.tuple(lhs_vars), rhs)]
assert isinstance(rhs, (list,tuple))
assert len(lhs_vars) == len(rhs), \
"Mismatch between LHS %s and RHS %s : %s => %s in stmt %s" % \
(lhs_vars, stmt.rhs, stmt.rhs.type, rhs, stmt)
result = []
for var, value in zip(lhs_vars, rhs):
result.append(Assign(var, value))
return result
elif c is Index:
array_t = stmt.lhs.value.type
if isinstance(array_t, PtrT):
return stmt
indices = self.flatten_expr(stmt.lhs.index)
values = self.flatten_expr(stmt.lhs.value)
data = get_field_elts(array_t, values, 'data')[0]
shape = get_field_elts(array_t, values, 'shape')
strides = get_field_elts(array_t, values, 'strides')
offset = get_field_elts(array_t, values, 'offset')[0]
n_dims = len(strides)
n_indices = len(indices)
assert n_dims == n_indices, \
"Expected %d indices but only got %d in %s" % (n_dims, n_indices, stmt)
for idx, stride in zip(indices, strides):
offset = self.add(offset, self.mul(idx, stride))
stmt.lhs = self.index(data, offset, temp=False)
stmt.rhs = rhs[0]
return [stmt]
else:
assert False, "LHS not supported in flattening: %s" % stmt
def enter_branch(self, phi_nodes):
for (k, (left, _)) in phi_nodes.iteritems():
self.var_expansions[k] = self.flatten_lhs_name(k, left.type)
def flatten_merge(self, phi_nodes):
result = {}
for (k, (left, right)) in phi_nodes.iteritems():
t = left.type
assert right.type == t
if isinstance(t, (ScalarT, PtrT)):
result[k] = (self.flatten_scalar_expr(left), self.flatten_scalar_expr(right))
elif isinstance(t, (FnT, NoneT)):
continue
else:
fields = self.var_expansions[k]
flat_left = self.flatten_expr(left)
flat_right = self.flatten_expr(right)
assert len(fields) == len(flat_left)
assert len(fields) == len(flat_right)
for i, var in enumerate(fields):
result[var.name] = (flat_left[i], flat_right[i])
return result
def flatten_ForLoop(self, stmt):
self.enter_branch(stmt.merge)
var = self.flatten_scalar_lhs_var(stmt.var)
start = self.flatten_scalar_expr(stmt.start)
stop = self.flatten_scalar_expr(stmt.stop)
step = self.flatten_scalar_expr(stmt.step)
body = self.flatten_block(stmt.body)
merge = self.flatten_merge(stmt.merge)
return ForLoop(var, start, stop, step, body, merge)
def flatten_While(self, stmt):
self.enter_branch(stmt.merge)
cond = self.flatten_scalar_expr(stmt.cond)
body = self.flatten_block(stmt.body)
merge = self.flatten_merge(stmt.merge)
return While(cond, body, merge)
def flatten_If(self, stmt):
self.enter_branch(stmt.merge)
cond = self.flatten_scalar_expr(stmt.cond)
true = self.flatten_block(stmt.true)
false = self.flatten_block(stmt.false)
merge = self.flatten_merge(stmt.merge)
assert merge is not None
return If(cond, true, false, merge = merge)
def flatten_ExprStmt(self, stmt):
return ExprStmt(value = self.flatten_expr(stmt.value))
def flatten_ParFor(self, stmt):
new_fn, closure_elts = self.flatten_fn(stmt.fn)
closure = self.closure(new_fn, closure_elts)
bounds = single_value(self.flatten_expr(stmt.bounds))
return ParFor(fn = closure, bounds = bounds)
def flatten_Return(self, stmt):
return Return(single_value(self.flatten_expr(stmt.value)))
def flatten_Comment(self, stmt):
return stmt
def flatten_stmt(self, stmt):
method_name = "flatten_%s" % stmt.__class__.__name__
return getattr(self, method_name)(stmt)
#######################
#
# Expressions
#
#######################
def flatten_expr(self, expr):
method_name = "flatten_%s" % expr.__class__.__name__
return getattr(self, method_name)(expr)
def flatten_expr_list(self, exprs):
return concat_map(self.flatten_expr, exprs)
def flatten_scalar_expr(self, expr):
"""
Give me back a single expression instead of a list
"""
flat_exprs = self.flatten_expr(expr)
assert len(flat_exprs) == 1
return flat_exprs[0]
def flatten_scalar_expr_list(self, exprs):
assert isinstance(exprs, (list, tuple)), "Expected list, got %s" % (exprs,)
return [self.flatten_scalar_expr(e) for e in exprs]
def flatten_Const(self, expr):
if isinstance(expr.type, NoneT):
return ()
return (expr,)
def flatten_fn(self, closure):
fn = self.get_fn(closure)
import pipeline
fn = pipeline.indexify.apply(fn)
flat_fn = build_flat_fn(fn)
flat_closure_args = self.flatten_expr(closure)
return flat_fn, flat_closure_args
def flatten_Call(self, expr):
flat_fn, flat_closure_args = self.flatten_fn(expr.fn)
flat_args = self.flatten_expr_list(expr.args)
args = tuple(flat_closure_args) + tuple(flat_args)
return Call(flat_fn, args, type = flat_fn.return_type)
def flatten_Cast(self, expr):
return [expr]
def flatten_UntypedFn(self, expr):
return []
def flatten_TypedFn(self, expr):
return []
def flatten_Var(self, expr):
if isinstance(expr.type, (ScalarT, PtrT)):
return (expr,)
elif isinstance(expr.type, (FnT, NoneT)):
return ()
else:
name = expr.name
assert name in self.var_expansions, "No fields known for %s : %s" % (expr, expr.type)
return self.var_expansions[name]
def flatten_Tuple(self, expr):
return self.flatten_expr_list(expr.elts)
def flatten_field(self, struct, field):
elts = self.flatten_expr(struct)
start, stop = field_pos_range(struct.type, field)
return elts[start:stop]
def flatten_TupleProj(self, expr):
result = self.flatten_field(expr.tuple, expr.index)
return result
def flatten_Closure(self, expr):
return self.flatten_expr_list(expr.args)
def flatten_ClosureElt(self, expr):
return self.flatten_field(expr.closure, expr.index)
def flatten_Attribute(self, expr):
return self.flatten_field(expr.value, expr.name)
def flatten_Select(self, expr):
cond = self.flatten_scalar_expr(expr.cond)
true_values = self.flatten_expr(expr.true_value)
false_values = self.flatten_expr(expr.false_value)
assert len(true_values) == len(false_values)
return [self.select(cond,t,f) for t,f in zip(true_values, false_values)]
def flatten_Alloc(self, expr):
count_exprs = self.flatten_expr(expr.count)
assert len(count_exprs) == 1
return Alloc(count = count_exprs[0], elt_type = expr.elt_type, type = expr.type)
def flatten_Array(self, expr):
assert False, "Array node should be an explicit allocation by now"
# or, if we flatten structured elts, maybe we should handle it here?
def flatten_Index(self, expr):
t = expr.value.type
if isinstance(t, PtrT):
return [expr]
assert isinstance(t, ArrayT), "Expected Index to take array, got %s" % (expr.type,)
array_fields = self.flatten_expr(expr.value)
data_fields = get_field_elts(t, array_fields, 'data')
shape = get_field_elts(t, array_fields, 'shape')
strides = get_field_elts(t, array_fields, 'strides')
offset = get_field_elts(t, array_fields, 'offset')[0]
index = expr.index
if isinstance(index.type, (NoneT, SliceT, ScalarT)):
indices = [index]
elif isinstance(index, Tuple):
indices = index.elts
else:
assert isinstance(index.type, TupleT), "Expected index to scalar, slice, or tuple"
indices = self.tuple_elts(index)
#indices = self.flatten_expr(expr.index)
n_indices = len(indices)
n_strides = len(strides)
assert n_indices == n_strides, \
"Not supported: indices vs. dimensions: %d != %d in %s" % (n_indices, n_strides, expr)
# fast-path for the common case when we're indexing
# by all scalars to retrieve a scalar result
#if syntax.helpers.all_scalars(indices):
for i, idx in enumerate(indices):
offset = self.add(offset, self.mul(idx, strides[i]))
return [self.index(data_fields[0], offset)]
def flatten_PrimCall(self, expr):
args = self.flatten_scalar_expr_list(expr.args)
return [PrimCall(prim = expr.prim, args = args, type = expr.type)]
def flatten_Slice(self, expr):
return self.flatten_expr_list([expr.start, expr.stop, expr.step])
def flatten_Len(self, expr):
assert False, "Not implemented"
def flatten_ConstArray(self, expr):
assert False, "Not implemented"
def flatten_ConstArrayLike(self, expr):
assert False, "Not implemented"
def flatten_Range(self, expr):
assert False, "Not implemented"
def strides_from_shape_elts(self, shape_elts):
strides = [const_int(1)]
for dim in reversed(shape_elts[1:]):
strides = [self.mul(strides[0], dim)] + strides
return strides
def flatten_AllocArray(self, expr):
nelts = const_int(1)
shape_elts = self.flatten_expr(expr.shape)
for dim in shape_elts:
nelts = self.mul(nelts, dim)
ptr = Alloc(elt_type = expr.elt_type, count = nelts, type = ptr_type(expr.elt_type))
stride_elts = self.strides_from_shape_elts(shape_elts)
return (ptr,) + tuple(shape_elts) + tuple(stride_elts) + (self.int(0), nelts)
def flatten_ArrayView(self, expr):
data = self.flatten_expr(expr.data)
shape = self.flatten_expr(expr.shape)
strides = self.flatten_expr(expr.strides)
offset = self.flatten_expr(expr.offset)
size = self.flatten_expr(expr.size)
return data + shape + strides + offset + size
def flatten_Ravel(self, expr):
assert False, "Not implemented"
def flatten_Reshape(self, expr):
assert False, "Not implemented"
def flatten_Shape(self, expr):
assert False, "Not implemented"
def flatten_Strides(self, expr):
assert False, "Not implemented"
def flatten_Transpose(self, expr):
assert False, "Not implemented"
def flatten_Where(self, expr):
assert False, "Not implemented"
#######################
#
# Adverbs
#
#######################
def flatten_Map(self, expr):
assert False, "Unexpected Map encountered during flattening, should be IndexScan"
def flatten_Reduce(self, expr):
assert False, "Unexpected Reduce encountered during flattening, should be IndexScan"
def flatten_Scan(self, expr):
assert False, "Unexpected Scan encountered during flattening, should be IndexScan"
def flatten_IndexMap(self, expr):
#fn, closure_args = self.flatten_fn(expr.fn)
#shape_elts = self.flatten_expr(expr.shape)
# import pdb; pdb.set_trace()
#return IndexMap(fn = self.closure(fn, closure_args), shape = self.tuple(shape_elts), type = None)
assert False, "Unexpected IndexMap, should have been turned into ParFor before flattening"
def flatten_OuterMap(self, expr):
assert False, "Unexpected OuterMap, should have been turned into ParFor before flattening"
def flatten_IndexReduce(self, expr):
# assert isinstance(expr.type, ScalarT), "Non-scalar reductions not yet implemented"
fn, fn_args = self.flatten_fn(expr.fn)
fn = self.closure(fn, fn_args)
combine, combine_args = self.flatten_fn(expr.combine)
combine = self.closure(combine, combine_args)
shape = self.tuple(self.flatten_expr(expr.shape))
init = self.flatten_expr(expr.init)
t = flatten_type(expr.type)
if len(t) == 1:
init = init[0]
t = t[0]
else:
init = self.tuple(init)
t = make_tuple_type(t)
result = IndexReduce(fn = fn, combine = combine, shape = shape, type = t, init = init)
return [result]
def flatten_IndexScan(self, expr):
assert False, "IndexScan Not implemented"
def flatten_lhs_name(self, name, t):
if isinstance(t, (ScalarT, PtrT)):
return [Var(name = name, type = t)]
elif isinstance(t, (NoneT, FnT, TypeValueT)):
return []
elif isinstance(t, SliceT):
base = name.replace(".", "_")
start = Var(name = "%s_start" % base, type = t.start_type)
stop = Var(name = "%s_stop" % base, type = t.stop_type)
step = Var(name = "%s_step" % base, type = t.step_type)
field_vars = [start, stop, step]
elif isinstance(t, ClosureT):
base = name.replace(".", "_")
field_vars = [Var(name = "%s_closure_elt%d" % (base,i) , type = t)
for i,t in enumerate(t.arg_types)]
elif isinstance(t, TupleT):
base = name.replace(".", "_")
field_vars = [Var(name = "%s_elt%d" % (base, i), type = t)
for i,t in enumerate(t.elt_types)]
elif isinstance(t, ArrayT):
base = name.replace(".", "_")
data = Var(name = "%s_data" % base, type = t.ptr_t)
shape = Var(name = "%s_shape" % base, type = t.shape_t)
strides = Var(name = "%s_strides" % base, type = t.strides_t)
offset = Var(name = "%s_offset" % base, type = Int64)
nelts = Var(name = "%s_nelts" % base, type = Int64)
field_vars = [data, shape, strides, offset, nelts]
else:
assert False, "Unsupport type %s" % (t,)
return self.flatten_lhs_vars(field_vars)
def flatten_lhs_var(self, old_var):
t = old_var.type
if isinstance(t, (PtrT, ScalarT)):
return [old_var]
name = old_var.name
return self.flatten_lhs_name(name, t)
def flatten_lhs_vars(self, old_vars):
return concat_map(self.flatten_lhs_var, old_vars)
def flatten_scalar_lhs_var(self, old_var):
lhs_vars = self.flatten_lhs_var(old_var)
assert len(lhs_vars) == 1
return lhs_vars[0]
def build_flat_fn(old_fn, _cache = {}):
key = old_fn.cache_key
if key in _cache:
return _cache[key]
flat_fn = BuildFlatFn(old_fn).run()
_cache[key] = flat_fn
_cache[flat_fn.cache_key] = flat_fn
return flat_fn
class Flatten(Transform):
def unbox_var(self, var):
t = var.type
if isinstance(t, (FnT, NoneT, TypeValueT)):
return []
elif isinstance(t, (PtrT, ScalarT)):
return [var]
elif isinstance(t, ArrayT):
# for structured arrays, should this be a tuple?
base = var.name.replace(".", "_")
data = self.attr(var, 'data', name = base + "_data")
shape = self.attr(var, 'shape', name = base + "_shape")
strides = self.attr(var, 'strides', name = base + "_strides")
offset = self.attr(var, 'offset', name = base + "_offset")
size = self.attr(var, 'size', name = base + "_size")
return self.unbox_vars([data, shape, strides, offset, size])
elif isinstance(t, SliceT):
start = self.attr(var, 'start')
stop = self.attr(var, 'stop')
step = self.attr(var, 'step')
return self.unbox_vars([start, stop, step])
elif isinstance(t, ClosureT):
base = var.name.replace(".", "_")
closure_elts = [self.closure_elt(var, i, name = base + "_closure_elt%d" % i)
for i in xrange(len(t.arg_types))]
return self.unbox_vars(closure_elts)
elif isinstance(t, TupleT):
base = var.name.replace(".", "_")
tuple_elts = [self.assign_name(self.tuple_proj(var, i), name = base + "_elt%d" % i)
for i in xrange(len(t.elt_types))]
return self.unbox_vars(tuple_elts)
else:
assert False, "Unsupported type %s" % (t,)
def unbox_vars(self, exprs):
return concat_map(self.unbox_var, exprs)
def to_seq(self, expr):
if isinstance(expr.type, TupleT):
return self.tuple_elts(expr)
elif isinstance(expr.type, (FnT, NoneT)):
return []
else:
return [expr]
def box(self, t, elts):
if isinstance(t, NoneT):
assert len(elts) == 0, "Expected 0 values for None, got %s" % (elts,)
return none
elif isinstance(t, ScalarT):
assert len(elts) == 1
return elts[0]
elif isinstance(t, SliceT):
assert len(elts) == 3
start, stop, step = elts
return self.slice_value(start, stop, step)
elif isinstance(t, ArrayT):
data = get_field_elts(t, elts, 'data')[0]
shape = self.tuple(get_field_elts(t, elts, 'shape'))
strides = self.tuple(get_field_elts(t, elts, 'strides'))
offset = get_field_elts(t, elts, 'offset')[0]
nelts = get_field_elts(t, elts, 'size')[0]
return self.array_view(data, shape, strides, offset, nelts)
elif isinstance(t, TupleT):
boxed_elts = []
for i, elt_t in enumerate(t.elt_types):
elt = self.box(elt_t, get_field_elts(t, elts, i))
boxed_elts.append(elt)
return self.tuple(boxed_elts)
elif isinstance(t, ClosureT):
assert False, "Not implemented: ClosureT"
elif isinstance(t, FnT):
assert False, "Not implemented: FnT"
def transform_block(self, stmts):
return stmts
def pre_apply(self, old_fn, _cache = {}):
key = old_fn.cache_key
if key in _cache:
return _cache[key]
flat_fn = build_flat_fn(old_fn)
flat_fn.created_by = old_fn.created_by
flat_fn.transform_history = old_fn.transform_history.copy()
input_vars = mk_vars(old_fn.arg_names, old_fn.input_types)
self.blocks.push()
unboxed_inputs = self.unbox_vars(input_vars)
assert len(unboxed_inputs) == len(flat_fn.input_types)
unboxed_result = self.call(flat_fn, unboxed_inputs, name = "unboxed_result")
unboxed_elts = self.to_seq(unboxed_result)
boxed_result = self.box(old_fn.return_type, unboxed_elts)
self.return_(boxed_result)
old_fn.body = self.blocks.pop()
_cache[key] = old_fn
_cache[flat_fn.cache_key] = flat_fn
return old_fn
|
|
# -*- coding: utf-8 -*-
"""
File related views, including view_file, view_history_file, view_trash_file,
view_snapshot_file, view_shared_file, file_edit, etc.
"""
import os
import hashlib
import json
import stat
import urllib2
import chardet
import logging
import posixpath
import re
from django.core.cache import cache
from django.contrib.sites.models import RequestSite
from django.contrib import messages
from django.contrib.auth.hashers import check_password
from django.core.urlresolvers import reverse
from django.db.models import F
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.http import urlquote
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django.template.defaultfilters import filesizeformat
from django.views.decorators.csrf import csrf_exempt
from seaserv import seafile_api
from seaserv import get_repo, send_message, \
get_commits, check_permission, get_shared_groups_by_repo,\
is_group_user, get_file_id_by_path, get_commit, get_file_size, \
get_org_groups_by_repo, seafserv_rpc, seafserv_threaded_rpc
from pysearpc import SearpcError
from seahub.avatar.templatetags.avatar_tags import avatar
from seahub.avatar.templatetags.group_avatar_tags import grp_avatar
from seahub.auth.decorators import login_required
from seahub.base.decorators import repo_passwd_set_required
from seahub.contacts.models import Contact
from seahub.share.models import FileShare, PrivateFileDirShare, \
check_share_link_access, set_share_link_access
from seahub.share.forms import SharedLinkPasswordForm
from seahub.wiki.utils import get_wiki_dirent
from seahub.wiki.models import WikiDoesNotExist, WikiPageMissing
from seahub.utils import show_delete_days, render_error, is_org_context, \
get_file_type_and_ext, gen_file_get_url, gen_file_share_link, \
render_permission_error, \
is_textual_file, mkstemp, EMPTY_SHA1, HtmlDiff, \
check_filename_with_rename, gen_inner_file_get_url, normalize_file_path, \
user_traffic_over_limit, do_md5
from seahub.utils.ip import get_remote_ip
from seahub.utils.file_types import (IMAGE, PDF, DOCUMENT, SPREADSHEET, AUDIO,
MARKDOWN, TEXT, SF, OPENDOCUMENT, VIDEO)
from seahub.utils.star import is_file_starred
from seahub.utils import HAS_OFFICE_CONVERTER, FILEEXT_TYPE_MAP
from seahub.views import check_folder_permission
if HAS_OFFICE_CONVERTER:
from seahub.utils import (
query_office_convert_status, query_office_file_pages, add_office_convert_task,
prepare_converted_html, OFFICE_PREVIEW_MAX_SIZE, get_office_converted_page
)
import seahub.settings as settings
from seahub.settings import FILE_ENCODING_LIST, FILE_PREVIEW_MAX_SIZE, \
FILE_ENCODING_TRY_LIST, USE_PDFJS, MEDIA_URL, SITE_ROOT
from seahub.views import is_registered_user, check_repo_access_permission, \
get_unencry_rw_repos_by_user, get_file_access_permission
# Get an instance of a logger
logger = logging.getLogger(__name__)
def gen_path_link(path, repo_name):
"""
Generate navigate paths and links in repo page.
"""
if path and path[-1] != '/':
path += '/'
paths = []
links = []
if path and path != '/':
paths = path[1:-1].split('/')
i = 1
for name in paths:
link = '/' + '/'.join(paths[:i])
i = i + 1
links.append(link)
if repo_name:
paths.insert(0, repo_name)
links.insert(0, '/')
zipped = zip(paths, links)
return zipped
def get_file_content(file_type, raw_path, file_enc):
"""Get textual file content, including txt/markdown/seaf.
"""
return repo_file_get(raw_path, file_enc) if is_textual_file(
file_type=file_type) else ('', '', '')
def repo_file_get(raw_path, file_enc):
"""
Get file content and encoding.
"""
err = ''
file_content = ''
encoding = None
if file_enc != 'auto':
encoding = file_enc
try:
file_response = urllib2.urlopen(raw_path)
content = file_response.read()
except urllib2.HTTPError, e:
logger.error(e)
err = _(u'HTTPError: failed to open file online')
return err, '', None
except urllib2.URLError as e:
logger.error(e)
err = _(u'URLError: failed to open file online')
return err, '', None
else:
if encoding:
try:
u_content = content.decode(encoding)
except UnicodeDecodeError:
err = _(u'The encoding you chose is not proper.')
return err, '', encoding
else:
for enc in FILE_ENCODING_TRY_LIST:
try:
u_content = content.decode(enc)
encoding = enc
break
except UnicodeDecodeError:
if enc != FILE_ENCODING_TRY_LIST[-1]:
continue
else:
encoding = chardet.detect(content)['encoding']
if encoding:
try:
u_content = content.decode(encoding)
except UnicodeDecodeError:
err = _(u'Unknown file encoding')
return err, '', ''
else:
err = _(u'Unknown file encoding')
return err, '', ''
file_content = u_content
return err, file_content, encoding
def get_file_view_path_and_perm(request, repo_id, obj_id, path, use_onetime=True):
""" Get path and the permission to view file.
Returns:
outer fileserver file url, inner fileserver file url, permission
"""
username = request.user.username
filename = os.path.basename(path)
# user_perm = get_file_access_permission(repo_id, path, username) or \
# get_repo_access_permission(repo_id, username)
user_perm = check_repo_access_permission(repo_id, request.user)
if user_perm is None:
return ('', '', user_perm)
else:
# Get a token to visit file
token = seafile_api.get_fileserver_access_token(repo_id, obj_id, 'view',
username, use_onetime=use_onetime)
outer_url = gen_file_get_url(token, filename)
inner_url = gen_inner_file_get_url(token, filename)
return (outer_url, inner_url, user_perm)
def handle_textual_file(request, filetype, raw_path, ret_dict):
# encoding option a user chose
file_enc = request.GET.get('file_enc', 'auto')
if not file_enc in FILE_ENCODING_LIST:
file_enc = 'auto'
err, file_content, encoding = get_file_content(filetype,
raw_path, file_enc)
file_encoding_list = FILE_ENCODING_LIST
if encoding and encoding not in FILE_ENCODING_LIST:
file_encoding_list.append(encoding)
# populate return value dict
ret_dict['err'] = err
ret_dict['file_content'] = file_content
ret_dict['encoding'] = encoding
ret_dict['file_enc'] = file_enc
ret_dict['file_encoding_list'] = file_encoding_list
def handle_document(raw_path, obj_id, fileext, ret_dict):
if HAS_OFFICE_CONVERTER:
err, html_exists = prepare_converted_html(raw_path, obj_id, fileext, ret_dict)
# populate return value dict
ret_dict['err'] = err
ret_dict['html_exists'] = html_exists
else:
ret_dict['filetype'] = 'Unknown'
def handle_spreadsheet(raw_path, obj_id, fileext, ret_dict):
handle_document(raw_path, obj_id, fileext, ret_dict)
def handle_pdf(raw_path, obj_id, fileext, ret_dict):
if USE_PDFJS:
# use pdfjs to preview PDF
pass
elif HAS_OFFICE_CONVERTER:
# use flash to prefiew PDF
err, html_exists = prepare_converted_html(raw_path, obj_id, fileext, ret_dict)
# populate return value dict
ret_dict['err'] = err
ret_dict['html_exists'] = html_exists
else:
# can't preview PDF
ret_dict['filetype'] = 'Unknown'
def convert_md_link(file_content, repo_id, username):
def repl(matchobj):
if matchobj.group(2): # return origin string in backquotes
return matchobj.group(2)
link_alias = link_name = matchobj.group(1).strip()
if len(link_name.split('|')) > 1:
link_alias = link_name.split('|')[0]
link_name = link_name.split('|')[1]
filetype, fileext = get_file_type_and_ext(link_name)
if fileext == '':
# convert link_name that extension is missing to a markdown page
try:
dirent = get_wiki_dirent(repo_id, link_name)
path = "/" + dirent.obj_name
href = reverse('view_lib_file', args=[repo_id, urlquote(path)])
a_tag = '''<a href="%s">%s</a>'''
return a_tag % (href, link_alias)
except (WikiDoesNotExist, WikiPageMissing):
a_tag = '''<p class="wiki-page-missing">%s</p>'''
return a_tag % (link_alias)
elif filetype == IMAGE:
# load image to current page
path = "/" + link_name
filename = os.path.basename(path)
obj_id = get_file_id_by_path(repo_id, path)
if not obj_id:
return '''<p class="wiki-page-missing">%s</p>''' % link_name
token = seafile_api.get_fileserver_access_token(repo_id, obj_id,
'view', username)
return '<img class="wiki-image" src="%s" alt="%s" />' % (gen_file_get_url(token, filename), filename)
else:
from seahub.base.templatetags.seahub_tags import file_icon_filter
# convert other types of filelinks to clickable links
path = "/" + link_name
icon = file_icon_filter(link_name)
s = reverse('view_lib_file', args=[repo_id, urlquote(path)])
a_tag = '''<img src="%simg/file/%s" alt="%s" class="vam" /> <a href="%s" target="_blank" class="vam">%s</a>'''
return a_tag % (MEDIA_URL, icon, icon, s, link_name)
return re.sub(r'\[\[(.+?)\]\]|(`.+?`)', repl, file_content)
def file_size_exceeds_preview_limit(file_size, file_type):
"""Check whether file size exceeds the preview limit base on different
type of file.
"""
if file_type in (DOCUMENT, PDF) and HAS_OFFICE_CONVERTER:
if file_size > OFFICE_PREVIEW_MAX_SIZE:
err = _(u'File size surpasses %s, can not be opened online.') % \
filesizeformat(OFFICE_PREVIEW_MAX_SIZE)
return True, err
else:
return False, ''
else:
if file_size > FILE_PREVIEW_MAX_SIZE:
err = _(u'File size surpasses %s, can not be opened online.') % \
filesizeformat(FILE_PREVIEW_MAX_SIZE)
return True, err
else:
return False, ''
def can_preview_file(file_name, file_size):
"""Check whether a file can be viewed online.
Returns (True, None) if file can be viewed online, otherwise
(False, erro_msg).
"""
file_type, file_ext = get_file_type_and_ext(file_name)
if file_ext in FILEEXT_TYPE_MAP: # check file extension
exceeds_limit, err_msg = file_size_exceeds_preview_limit(file_size,
file_type)
if exceeds_limit:
return (False, err_msg)
else:
return (True, None)
else:
# TODO: may need a better way instead of return string, and compare
# that string in templates
return (False, "invalid extension")
@login_required
@repo_passwd_set_required
def view_repo_file(request, repo_id):
"""
Old 'file view' that put path in parameter
"""
path = request.GET.get('p', '/').rstrip('/')
return _file_view(request, repo_id, path)
@login_required
@repo_passwd_set_required
def view_lib_file(request, repo_id, path):
"""
New 'file view' that not put path in parameter
"""
return _file_view(request, repo_id, path)
def _file_view(request, repo_id, path):
"""
Steps to view file:
1. Get repo id and file path.
2. Check user's permission.
3. Check whether this file can be viewed online.
4.1 Get file content if file is text file.
4.2 Prepare flash if file is document.
4.3 Prepare or use pdfjs if file is pdf.
4.4 Other file return it's raw path.
"""
username = request.user.username
# check arguments
repo = get_repo(repo_id)
if not repo:
raise Http404
obj_id = get_file_id_by_path(repo_id, path)
if not obj_id:
return render_error(request, _(u'File does not exist'))
# construct some varibles
u_filename = os.path.basename(path)
current_commit = get_commits(repo_id, 0, 1)[0]
# get file type and extension
filetype, fileext = get_file_type_and_ext(u_filename)
# Check whether user has permission to view file and get file raw path,
# render error page if permission deny.
if filetype == VIDEO or filetype == AUDIO:
raw_path, inner_path, user_perm = get_file_view_path_and_perm(
request, repo_id, obj_id, path, use_onetime=False)
else:
raw_path, inner_path, user_perm = get_file_view_path_and_perm(
request, repo_id, obj_id, path)
if not user_perm:
return render_permission_error(request, _(u'Unable to view file'))
# check if the user is the owner or not, for 'private share'
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo.id)
is_repo_owner = True if repo_owner == username else False
else:
is_repo_owner = seafile_api.is_repo_owner(username, repo.id)
img_prev = None
img_next = None
ret_dict = {'err': '', 'file_content': '', 'encoding': '', 'file_enc': '',
'file_encoding_list': [], 'html_exists': False,
'filetype': filetype}
fsize = get_file_size(repo.store_id, repo.version, obj_id)
can_preview, err_msg = can_preview_file(u_filename, fsize)
if can_preview:
"""Choose different approach when dealing with different type of file."""
if is_textual_file(file_type=filetype):
handle_textual_file(request, filetype, inner_path, ret_dict)
if filetype == MARKDOWN:
c = ret_dict['file_content']
ret_dict['file_content'] = convert_md_link(c, repo_id, username)
elif filetype == DOCUMENT:
handle_document(inner_path, obj_id, fileext, ret_dict)
elif filetype == SPREADSHEET:
handle_spreadsheet(inner_path, obj_id, fileext, ret_dict)
elif filetype == OPENDOCUMENT:
if fsize == 0:
ret_dict['err'] = _(u'Invalid file format.')
elif filetype == PDF:
handle_pdf(inner_path, obj_id, fileext, ret_dict)
elif filetype == IMAGE:
parent_dir = os.path.dirname(path)
dirs = seafile_api.list_dir_by_commit_and_path(current_commit.repo_id,
current_commit.id, parent_dir)
if not dirs:
raise Http404
img_list = []
for dirent in dirs:
if not stat.S_ISDIR(dirent.props.mode):
fltype, flext = get_file_type_and_ext(dirent.obj_name)
if fltype == 'Image':
img_list.append(dirent.obj_name)
if len(img_list) > 1:
img_list.sort(lambda x, y : cmp(x.lower(), y.lower()))
cur_img_index = img_list.index(u_filename)
if cur_img_index != 0:
img_prev = posixpath.join(parent_dir, img_list[cur_img_index - 1])
if cur_img_index != len(img_list) - 1:
img_next = posixpath.join(parent_dir, img_list[cur_img_index + 1])
template = 'view_file_%s.html' % ret_dict['filetype'].lower()
else:
ret_dict['err'] = err_msg
template = 'view_file_base.html'
# generate file path navigator
zipped = gen_path_link(path, repo.name)
# file shared link
l = FileShare.objects.filter(repo_id=repo_id).filter(
username=username).filter(path=path)
fileshare = l[0] if len(l) > 0 else None
http_or_https = request.is_secure() and 'https' or 'http'
domain = RequestSite(request).domain
if fileshare:
file_shared_link = gen_file_share_link(fileshare.token)
else:
file_shared_link = ''
for g in request.user.joined_groups:
g.avatar = grp_avatar(g.id, 20)
file_path_hash = hashlib.md5(urllib2.quote(path.encode('utf-8'))).hexdigest()[:12]
# fetch file contributors and latest contributor
try:
# get real path for sub repo
real_path = repo.origin_path + path if repo.origin_path else path
dirent = seafile_api.get_dirent_by_path(repo.store_id, real_path)
latest_contributor, last_modified = dirent.modifier, dirent.mtime
except SearpcError as e:
logger.error(e)
latest_contributor, last_modified = None, 0
# check whether file is starred
is_starred = False
org_id = -1
if request.user.org:
org_id = request.user.org.org_id
is_starred = is_file_starred(username, repo.id, path.encode('utf-8'), org_id)
office_preview_token = ret_dict.get('office_preview_token', '')
return render_to_response(template, {
'repo': repo,
'is_repo_owner': is_repo_owner,
'obj_id': obj_id,
'filename': u_filename,
'path': path,
'zipped': zipped,
'current_commit': current_commit,
'fileext': fileext,
'raw_path': raw_path,
'fileshare': fileshare,
'protocol': http_or_https,
'domain': domain,
'file_shared_link': file_shared_link,
'err': ret_dict['err'],
'file_content': ret_dict['file_content'],
'file_enc': ret_dict['file_enc'],
'encoding': ret_dict['encoding'],
'file_encoding_list': ret_dict['file_encoding_list'],
'html_exists': ret_dict['html_exists'],
'html_detail': ret_dict.get('html_detail', {}),
'filetype': ret_dict['filetype'],
'use_pdfjs': USE_PDFJS,
'latest_contributor': latest_contributor,
'last_modified': last_modified,
'last_commit_id': repo.head_cmmt_id,
'is_starred': is_starred,
'user_perm': user_perm,
'img_prev': img_prev,
'img_next': img_next,
'highlight_keyword': settings.HIGHLIGHT_KEYWORD,
'office_preview_token': office_preview_token,
}, context_instance=RequestContext(request))
def view_history_file_common(request, repo_id, ret_dict):
# check arguments
repo = get_repo(repo_id)
if not repo:
raise Http404
path = request.GET.get('p', '/')
commit_id = request.GET.get('commit_id', '')
if not commit_id:
raise Http404
obj_id = request.GET.get('obj_id', '')
if not obj_id:
raise Http404
# construct some varibles
u_filename = os.path.basename(path)
current_commit = get_commit(repo.id, repo.version, commit_id)
if not current_commit:
raise Http404
# get file type and extension
filetype, fileext = get_file_type_and_ext(u_filename)
# Check whether user has permission to view file and get file raw path,
# render error page if permission deny.
if filetype == VIDEO or filetype == AUDIO:
raw_path, inner_path, user_perm = get_file_view_path_and_perm(
request, repo_id, obj_id, path, use_onetime=False)
else:
raw_path, inner_path, user_perm = get_file_view_path_and_perm(
request, repo_id, obj_id, path)
request.user_perm = user_perm
if user_perm:
# Check file size
fsize = get_file_size(repo.store_id, repo.version, obj_id)
if fsize > FILE_PREVIEW_MAX_SIZE:
err = _(u'File size surpasses %s, can not be opened online.') % \
filesizeformat(FILE_PREVIEW_MAX_SIZE)
ret_dict['err'] = err
elif filetype in (DOCUMENT, PDF) and HAS_OFFICE_CONVERTER and fsize > OFFICE_PREVIEW_MAX_SIZE:
err = _(u'File size surpasses %s, can not be opened online.') % \
filesizeformat(OFFICE_PREVIEW_MAX_SIZE)
ret_dict['err'] = err
else:
"""Choose different approach when dealing with different type of file."""
if is_textual_file(file_type=filetype):
handle_textual_file(request, filetype, inner_path, ret_dict)
elif filetype == DOCUMENT:
handle_document(inner_path, obj_id, fileext, ret_dict)
elif filetype == SPREADSHEET:
handle_spreadsheet(inner_path, obj_id, fileext, ret_dict)
elif filetype == OPENDOCUMENT:
if fsize == 0:
ret_dict['err'] = _(u'Invalid file format.')
elif filetype == PDF:
handle_pdf(inner_path, obj_id, fileext, ret_dict)
else:
pass
# populate return value dict
ret_dict['repo'] = repo
ret_dict['obj_id'] = obj_id
ret_dict['file_name'] = u_filename
ret_dict['path'] = path
ret_dict['current_commit'] = current_commit
ret_dict['fileext'] = fileext
ret_dict['raw_path'] = raw_path
if not ret_dict.has_key('filetype'):
ret_dict['filetype'] = filetype
ret_dict['use_pdfjs'] = USE_PDFJS
@repo_passwd_set_required
def view_history_file(request, repo_id):
ret_dict = {}
view_history_file_common(request, repo_id, ret_dict)
if not request.user_perm:
return render_permission_error(request, _(u'Unable to view file'))
# generate file path navigator
path = ret_dict['path']
repo = ret_dict['repo']
ret_dict['zipped'] = gen_path_link(path, repo.name)
return render_to_response('view_history_file.html', ret_dict,
context_instance=RequestContext(request))
@repo_passwd_set_required
def view_trash_file(request, repo_id):
ret_dict = {}
view_history_file_common(request, repo_id, ret_dict)
if not request.user_perm:
return render_permission_error(request, _(u'Unable to view file'))
basedir = request.GET.get('base', '')
if not basedir:
raise Http404
days = show_delete_days(request)
ret_dict['basedir'] = basedir
ret_dict['days'] = days
# generate file path navigator
path = ret_dict['path']
repo = ret_dict['repo']
ret_dict['zipped'] = gen_path_link(path, repo.name)
return render_to_response('view_trash_file.html', ret_dict,
context_instance=RequestContext(request), )
@repo_passwd_set_required
def view_snapshot_file(request, repo_id):
ret_dict = {}
view_history_file_common(request, repo_id, ret_dict)
if not request.user_perm:
return render_permission_error(request, _(u'Unable to view file'))
# generate file path navigator
path = ret_dict['path']
repo = ret_dict['repo']
ret_dict['zipped'] = gen_path_link(path, repo.name)
return render_to_response('view_snapshot_file.html', ret_dict,
context_instance=RequestContext(request), )
def _download_file_from_share_link(request, fileshare):
"""Download shared file or private shared file.
`path` need to be provided by frontend, if missing, use `fileshare.path`
"""
next = request.META.get('HTTP_REFERER', settings.SITE_ROOT)
username = request.user.username
if isinstance(fileshare, PrivateFileDirShare):
fileshare.username = fileshare.from_user
shared_by = fileshare.username
repo = get_repo(fileshare.repo_id)
if not repo:
raise Http404
# Construct real file path if download file in shared dir, otherwise, just
# use path in DB.
if isinstance(fileshare, FileShare) and fileshare.is_dir_share_link():
req_path = request.GET.get('p', '')
if not req_path:
messages.error(request, _(u'Unable to download file, invalid file path'))
return HttpResponseRedirect(next)
real_path = posixpath.join(fileshare.path, req_path.lstrip('/'))
else:
real_path = fileshare.path
filename = os.path.basename(real_path)
obj_id = seafile_api.get_file_id_by_path(repo.id, real_path)
if not obj_id:
messages.error(request, _(u'Unable to download file, wrong file path'))
return HttpResponseRedirect(next)
# check whether owner's traffic over the limit
if user_traffic_over_limit(fileshare.username):
messages.error(request, _(u'Unable to download file, share link traffic is used up.'))
return HttpResponseRedirect(next)
send_file_download_msg(request, repo, real_path, 'share-link')
try:
file_size = seafile_api.get_file_size(repo.store_id, repo.version,
obj_id)
send_message('seahub.stats', 'file-download\t%s\t%s\t%s\t%s' %
(repo.id, shared_by, obj_id, file_size))
except Exception as e:
logger.error('Error when sending file-download message: %s' % str(e))
dl_token = seafile_api.get_fileserver_access_token(repo.id, obj_id,
'download', username)
return HttpResponseRedirect(gen_file_get_url(dl_token, filename))
def view_shared_file(request, token):
"""
View file via shared link.
Download share file if `dl` in request param.
View raw share file if `raw` in request param.
"""
assert token is not None # Checked by URLconf
fileshare = FileShare.objects.get_valid_file_link_by_token(token)
if fileshare is None:
raise Http404
if fileshare.is_encrypted():
if not check_share_link_access(request, token):
d = {'token': token, 'view_name': 'view_shared_file', }
if request.method == 'POST':
post_values = request.POST.copy()
post_values['enc_password'] = fileshare.password
form = SharedLinkPasswordForm(post_values)
d['form'] = form
if form.is_valid():
# set cache for non-anonymous user
if request.user.is_authenticated():
set_share_link_access(request, token)
else:
return render_to_response('share_access_validation.html', d,
context_instance=RequestContext(request))
else:
return render_to_response('share_access_validation.html', d,
context_instance=RequestContext(request))
if request.GET.get('dl', '') == '1':
# download shared file
return _download_file_from_share_link(request, fileshare)
shared_by = fileshare.username
repo_id = fileshare.repo_id
repo = get_repo(repo_id)
if not repo:
raise Http404
path = fileshare.path.rstrip('/') # Normalize file path
obj_id = seafile_api.get_file_id_by_path(repo_id, path)
if not obj_id:
return render_error(request, _(u'File does not exist'))
file_size = seafile_api.get_file_size(repo.store_id, repo.version, obj_id)
filename = os.path.basename(path)
filetype, fileext = get_file_type_and_ext(filename)
access_token = seafile_api.get_fileserver_access_token(repo.id, obj_id,
'view', '',
use_onetime=False)
raw_path = gen_file_get_url(access_token, filename)
if request.GET.get('raw', '') == '1':
# view raw shared file, directly show/download file depends on
# browsers
return HttpResponseRedirect(raw_path)
inner_path = gen_inner_file_get_url(access_token, filename)
# get file content
ret_dict = {'err': '', 'file_content': '', 'encoding': '', 'file_enc': '',
'file_encoding_list': [], 'html_exists': False,
'filetype': filetype}
exceeds_limit, err_msg = file_size_exceeds_preview_limit(file_size, filetype)
if exceeds_limit:
ret_dict['err'] = err_msg
else:
"""Choose different approach when dealing with different type of file."""
if is_textual_file(file_type=filetype):
handle_textual_file(request, filetype, inner_path, ret_dict)
elif filetype == DOCUMENT:
handle_document(inner_path, obj_id, fileext, ret_dict)
elif filetype == SPREADSHEET:
handle_spreadsheet(inner_path, obj_id, fileext, ret_dict)
elif filetype == OPENDOCUMENT:
if file_size == 0:
ret_dict['err'] = _(u'Invalid file format.')
elif filetype == PDF:
handle_pdf(inner_path, obj_id, fileext, ret_dict)
# Increase file shared link view_cnt, this operation should be atomic
fileshare.view_cnt = F('view_cnt') + 1
fileshare.save()
# send statistic messages
if ret_dict['filetype'] != 'Unknown':
try:
send_message('seahub.stats', 'file-view\t%s\t%s\t%s\t%s' % \
(repo.id, shared_by, obj_id, file_size))
except SearpcError, e:
logger.error('Error when sending file-view message: %s' % str(e))
accessible_repos = get_unencry_rw_repos_by_user(request)
save_to_link = reverse('save_shared_link') + '?t=' + token
traffic_over_limit = user_traffic_over_limit(shared_by)
office_preview_token = ret_dict.get('office_preview_token', '')
return render_to_response('shared_file_view.html', {
'repo': repo,
'obj_id': obj_id,
'path': path,
'file_name': filename,
'file_size': file_size,
'shared_token': token,
'access_token': access_token,
'fileext': fileext,
'raw_path': raw_path,
'shared_by': shared_by,
'err': ret_dict['err'],
'file_content': ret_dict['file_content'],
'encoding': ret_dict['encoding'],
'file_encoding_list':ret_dict['file_encoding_list'],
'html_exists': ret_dict['html_exists'],
'html_detail': ret_dict.get('html_detail', {}),
'office_preview_token': office_preview_token,
'filetype': ret_dict['filetype'],
'use_pdfjs':USE_PDFJS,
'accessible_repos': accessible_repos,
'save_to_link': save_to_link,
'traffic_over_limit': traffic_over_limit,
}, context_instance=RequestContext(request))
def view_raw_shared_file(request, token, obj_id, file_name):
"""Returns raw content of a shared file.
Arguments:
- `request`:
- `token`:
- `obj_id`:
- `file_name`:
"""
fileshare = FileShare.objects.get_valid_file_link_by_token(token)
if fileshare is None:
raise Http404
repo_id = fileshare.repo_id
repo = get_repo(repo_id)
if not repo:
raise Http404
# Normalize file path based on file or dir share link
if fileshare.is_file_share_link():
file_path = fileshare.path.rstrip('/')
else:
file_path = fileshare.path.rstrip('/') + '/' + file_name
real_obj_id = seafile_api.get_file_id_by_path(repo_id, file_path)
if not real_obj_id:
raise Http404
if real_obj_id != obj_id: # perm check
raise Http404
filename = os.path.basename(file_path)
username = request.user.username
token = seafile_api.get_fileserver_access_token(repo_id, real_obj_id, 'view',
username, use_onetime=False)
outer_url = gen_file_get_url(token, filename)
return HttpResponseRedirect(outer_url)
def view_file_via_shared_dir(request, token):
assert token is not None # Checked by URLconf
fileshare = FileShare.objects.get_valid_file_link_by_token(token)
if fileshare is None:
raise Http404
if request.GET.get('dl', '') == '1':
# download shared file
return _download_file_from_share_link(request, fileshare)
shared_by = fileshare.username
repo_id = fileshare.repo_id
repo = get_repo(repo_id)
if not repo:
raise Http404
# Get file path from frontend, and construct request file path
# with fileshare.path to real path, used to fetch file content by RPC.
req_path = request.GET.get('p', '').rstrip('/')
if not req_path:
raise Http404
real_path = posixpath.join(fileshare.path, req_path.lstrip('/'))
# generate dir navigator
if fileshare.path == '/':
zipped = gen_path_link(req_path, repo.name)
else:
zipped = gen_path_link(req_path, os.path.basename(fileshare.path[:-1]))
obj_id = seafile_api.get_file_id_by_path(repo_id, real_path)
if not obj_id:
return render_error(request, _(u'File does not exist'))
file_size = seafile_api.get_file_size(repo.store_id, repo.version, obj_id)
filename = os.path.basename(req_path)
filetype, fileext = get_file_type_and_ext(filename)
access_token = seafile_api.get_fileserver_access_token(repo.id, obj_id,
'view', '', use_onetime=False)
raw_path = gen_file_get_url(access_token, filename)
inner_path = gen_inner_file_get_url(access_token, filename)
img_prev = None
img_next = None
# get file content
ret_dict = {'err': '', 'file_content': '', 'encoding': '', 'file_enc': '',
'file_encoding_list': [], 'html_exists': False,
'filetype': filetype}
exceeds_limit, err_msg = file_size_exceeds_preview_limit(file_size, filetype)
if exceeds_limit:
ret_dict['err'] = err_msg
else:
"""Choose different approach when dealing with different type of file."""
if is_textual_file(file_type=filetype):
handle_textual_file(request, filetype, inner_path, ret_dict)
elif filetype == DOCUMENT:
handle_document(inner_path, obj_id, fileext, ret_dict)
elif filetype == SPREADSHEET:
handle_spreadsheet(inner_path, obj_id, fileext, ret_dict)
elif filetype == PDF:
handle_pdf(inner_path, obj_id, fileext, ret_dict)
elif filetype == IMAGE:
current_commit = get_commits(repo_id, 0, 1)[0]
real_parent_dir = os.path.dirname(real_path)
parent_dir = os.path.dirname(req_path)
dirs = seafile_api.list_dir_by_commit_and_path(current_commit.repo_id,
current_commit.id, real_parent_dir)
if not dirs:
raise Http404
img_list = []
for dirent in dirs:
if not stat.S_ISDIR(dirent.props.mode):
fltype, flext = get_file_type_and_ext(dirent.obj_name)
if fltype == 'Image':
img_list.append(dirent.obj_name)
if len(img_list) > 1:
img_list.sort(lambda x, y : cmp(x.lower(), y.lower()))
cur_img_index = img_list.index(filename)
if cur_img_index != 0:
img_prev = posixpath.join(parent_dir, img_list[cur_img_index - 1])
if cur_img_index != len(img_list) - 1:
img_next = posixpath.join(parent_dir, img_list[cur_img_index + 1])
# send statistic messages
if ret_dict['filetype'] != 'Unknown':
try:
send_message('seahub.stats', 'file-view\t%s\t%s\t%s\t%s' % \
(repo.id, shared_by, obj_id, file_size))
except SearpcError, e:
logger.error('Error when sending file-view message: %s' % str(e))
traffic_over_limit = user_traffic_over_limit(shared_by)
office_preview_token = ret_dict.get('office_preview_token', '')
return render_to_response('shared_file_view.html', {
'repo': repo,
'obj_id': obj_id,
'from_shared_dir': True,
'path': req_path,
'file_name': filename,
'file_size': file_size,
'shared_token': token,
'access_token': access_token,
'fileext': fileext,
'raw_path': raw_path,
'shared_by': shared_by,
'token': token,
'err': ret_dict['err'],
'file_content': ret_dict['file_content'],
'encoding': ret_dict['encoding'],
'file_encoding_list':ret_dict['file_encoding_list'],
'html_exists': ret_dict['html_exists'],
'html_detail': ret_dict.get('html_detail', {}),
'office_preview_token': office_preview_token,
'filetype': ret_dict['filetype'],
'use_pdfjs':USE_PDFJS,
'zipped': zipped,
'img_prev': img_prev,
'img_next': img_next,
'traffic_over_limit': traffic_over_limit,
}, context_instance=RequestContext(request))
def file_edit_submit(request, repo_id):
content_type = 'application/json; charset=utf-8'
def error_json(error_msg=_(u'Internal Error'), op=None):
return HttpResponse(json.dumps({'error': error_msg, 'op': op}),
status=400,
content_type=content_type)
username = request.user.username
if check_repo_access_permission(repo_id, request.user) != 'rw':
return error_json(_(u'Permission denied'))
repo = get_repo(repo_id)
if not repo:
return error_json(_(u'The library does not exist.'))
if repo.encrypted:
repo.password_set = seafile_api.is_password_set(repo_id, username)
if not repo.password_set:
return error_json(_(u'The library is encrypted.'), 'decrypt')
content = request.POST.get('content')
encoding = request.POST.get('encoding')
path = request.GET.get('p')
if content is None or not path or encoding not in ["gbk", "utf-8"]:
return error_json(_(u'Invalid arguments'))
head_id = request.GET.get('head', None)
content = content.encode(encoding)
# first dump the file content to a tmp file, then update the file
fd, tmpfile = mkstemp()
def remove_tmp_file():
try:
os.remove(tmpfile)
except:
pass
try:
bytesWritten = os.write(fd, content)
except:
bytesWritten = -1
finally:
os.close(fd)
if bytesWritten != len(content):
remove_tmp_file()
return error_json()
req_from = request.GET.get('from', '')
if req_from == 'wiki_page_edit' or req_from == 'wiki_page_new':
try:
gid = int(request.GET.get('gid', 0))
except ValueError:
gid = 0
wiki_name = os.path.splitext(os.path.basename(path))[0]
next = reverse('group_wiki', args=[gid, wiki_name])
elif req_from == 'personal_wiki_page_edit' or req_from == 'personal_wiki_page_new':
wiki_name = os.path.splitext(os.path.basename(path))[0]
next = reverse('personal_wiki', args=[wiki_name])
else:
next = reverse('view_lib_file', args=[repo_id, urlquote(path)])
parent_dir = os.path.dirname(path).encode('utf-8')
filename = os.path.basename(path).encode('utf-8')
try:
seafserv_threaded_rpc.put_file(repo_id, tmpfile, parent_dir,
filename, username, head_id)
remove_tmp_file()
return HttpResponse(json.dumps({'href': next}),
content_type=content_type)
except SearpcError, e:
remove_tmp_file()
return error_json(str(e))
@login_required
def file_edit(request, repo_id):
repo = get_repo(repo_id)
if not repo:
raise Http404
if request.method == 'POST':
return file_edit_submit(request, repo_id)
path = request.GET.get('p', '/')
if path[-1] == '/':
path = path[:-1]
u_filename = os.path.basename(path)
filename = urllib2.quote(u_filename.encode('utf-8'))
parent_dir = os.path.dirname(path)
if check_folder_permission(request, repo.id, parent_dir) != 'rw':
return render_permission_error(request, _(u'Unable to edit file'))
head_id = repo.head_cmmt_id
obj_id = get_file_id_by_path(repo_id, path)
if not obj_id:
return render_error(request, _(u'The file does not exist.'))
token = seafile_api.get_fileserver_access_token(repo_id, obj_id, 'view',
request.user.username)
# generate path and link
zipped = gen_path_link(path, repo.name)
filetype, fileext = get_file_type_and_ext(filename)
op = None
err = ''
file_content = None
encoding = None
file_encoding_list = FILE_ENCODING_LIST
if filetype == TEXT or filetype == MARKDOWN or filetype == SF:
if repo.encrypted:
repo.password_set = seafile_api.is_password_set(repo_id, request.user.username)
if not repo.password_set:
op = 'decrypt'
if not op:
inner_path = gen_inner_file_get_url(token, filename)
file_enc = request.GET.get('file_enc', 'auto')
if not file_enc in FILE_ENCODING_LIST:
file_enc = 'auto'
err, file_content, encoding = repo_file_get(inner_path, file_enc)
if encoding and encoding not in FILE_ENCODING_LIST:
file_encoding_list.append(encoding)
else:
err = _(u'Edit online is not offered for this type of file.')
# Redirect to different place according to from page when user click
# cancel button on file edit page.
cancel_url = reverse('view_lib_file', args=[repo.id, urlquote(path)])
page_from = request.GET.get('from', '')
gid = request.GET.get('gid', '')
wiki_name = os.path.splitext(u_filename)[0]
if page_from == 'wiki_page_edit' or page_from == 'wiki_page_new':
cancel_url = reverse('group_wiki', args=[gid, wiki_name])
elif page_from == 'personal_wiki_page_edit' or page_from == 'personal_wiki_page_new':
cancel_url = reverse('personal_wiki', args=[wiki_name])
return render_to_response('file_edit.html', {
'repo':repo,
'u_filename':u_filename,
'wiki_name': wiki_name,
'path':path,
'zipped':zipped,
'filetype':filetype,
'fileext':fileext,
'op':op,
'err':err,
'file_content':file_content,
'encoding': encoding,
'file_encoding_list':file_encoding_list,
'head_id': head_id,
'from': page_from,
'gid': gid,
'cancel_url': cancel_url,
}, context_instance=RequestContext(request))
@login_required
def view_raw_file(request, repo_id, file_path):
"""Returns raw content of a file.
Arguments:
- `request`:
- `repo_id`:
"""
repo = get_repo(repo_id)
if not repo:
raise Http404
file_path = file_path.rstrip('/')
if file_path[0] != '/':
file_path = '/' + file_path
obj_id = get_file_id_by_path(repo_id, file_path)
if not obj_id:
raise Http404
raw_path, inner_path, user_perm = get_file_view_path_and_perm(
request, repo.id, obj_id, file_path)
if user_perm is None:
raise Http404
return HttpResponseRedirect(raw_path)
def send_file_download_msg(request, repo, path, dl_type):
"""Send file downlaod msg.
Arguments:
- `request`:
- `repo`:
- `obj_id`:
- `dl_type`: web or api
"""
username = request.user.username
ip = get_remote_ip(request)
user_agent = request.META.get("HTTP_USER_AGENT")
msg = 'file-download-%s\t%s\t%s\t%s\t%s\t%s' % \
(dl_type, username, ip, user_agent, repo.id, path)
msg_utf8 = msg.encode('utf-8')
try:
send_message('seahub.stats', msg_utf8)
except Exception as e:
logger.error("Error when sending file-download-%s message: %s" %
(dl_type, str(e)))
@login_required
def download_file(request, repo_id, obj_id):
"""Download file for file/history file preview.
Arguments:
- `request`:
- `repo_id`:
- `obj_id`:
"""
username = request.user.username
repo = get_repo(repo_id)
if not repo:
raise Http404
if repo.encrypted and not seafile_api.is_password_set(repo_id, username):
return HttpResponseRedirect(reverse('view_common_lib_dir', args=[repo_id, '']))
# Permission check and generate download link
path = request.GET.get('p', '')
if check_repo_access_permission(repo_id, request.user) or \
get_file_access_permission(repo_id, path, username):
# Get a token to access file
token = seafile_api.get_fileserver_access_token(repo_id, obj_id,
'download', username)
else:
messages.error(request, _(u'Unable to download file'))
next = request.META.get('HTTP_REFERER', settings.SITE_ROOT)
return HttpResponseRedirect(next)
# send stats message
send_file_download_msg(request, repo, path, 'web')
file_name = os.path.basename(path.rstrip('/'))
redirect_url = gen_file_get_url(token, file_name)
return HttpResponseRedirect(redirect_url)
########## text diff
def get_file_content_by_commit_and_path(request, repo_id, commit_id, path, file_enc):
try:
obj_id = seafserv_threaded_rpc.get_file_id_by_commit_and_path( \
repo_id, commit_id, path)
except:
return None, 'bad path'
if not obj_id or obj_id == EMPTY_SHA1:
return '', None
else:
permission = check_repo_access_permission(repo_id, request.user)
if permission:
# Get a token to visit file
token = seafile_api.get_fileserver_access_token(repo_id, obj_id,
'view',
request.user.username)
else:
return None, 'permission denied'
filename = os.path.basename(path)
inner_path = gen_inner_file_get_url(token, filename)
try:
err, file_content, encoding = repo_file_get(inner_path, file_enc)
except Exception, e:
return None, 'error when read file from fileserver: %s' % e
return file_content, err
@login_required
def text_diff(request, repo_id):
commit_id = request.GET.get('commit', '')
path = request.GET.get('p', '')
u_filename = os.path.basename(path)
file_enc = request.GET.get('file_enc', 'auto')
if not file_enc in FILE_ENCODING_LIST:
file_enc = 'auto'
if not (commit_id and path):
return render_error(request, 'bad params')
repo = get_repo(repo_id)
if not repo:
return render_error(request, 'bad repo')
current_commit = seafserv_threaded_rpc.get_commit(repo.id, repo.version, commit_id)
if not current_commit:
return render_error(request, 'bad commit id')
prev_commit = seafserv_threaded_rpc.get_commit(repo.id, repo.version, current_commit.parent_id)
if not prev_commit:
return render_error('bad commit id')
path = path.encode('utf-8')
current_content, err = get_file_content_by_commit_and_path(request, \
repo_id, current_commit.id, path, file_enc)
if err:
return render_error(request, err)
prev_content, err = get_file_content_by_commit_and_path(request, \
repo_id, prev_commit.id, path, file_enc)
if err:
return render_error(request, err)
is_new_file = False
diff_result_table = ''
if prev_content == '' and current_content == '':
is_new_file = True
else:
diff = HtmlDiff()
diff_result_table = diff.make_table(prev_content.splitlines(),
current_content.splitlines(), True)
zipped = gen_path_link(path, repo.name)
return render_to_response('text_diff.html', {
'u_filename':u_filename,
'repo': repo,
'path': path,
'zipped': zipped,
'current_commit': current_commit,
'prev_commit': prev_commit,
'diff_result_table': diff_result_table,
'is_new_file': is_new_file,
}, context_instance=RequestContext(request))
########## office related
@require_POST
@csrf_exempt
def office_convert_add_task(request):
if not HAS_OFFICE_CONVERTER:
raise Http404
content_type = 'application/json; charset=utf-8'
try:
sec_token = request.POST.get('sec_token')
file_id = request.POST.get('file_id')
doctype = request.POST.get('doctype')
raw_path = request.POST.get('raw_path')
except KeyError:
return HttpResponseBadRequest('invalid params')
if sec_token != do_md5(settings.SECRET_KEY):
return HttpResponseForbidden()
if len(file_id) != 40:
return HttpResponseBadRequest('invalid params')
resp = add_office_convert_task(file_id, doctype, raw_path, internal=True)
return HttpResponse(json.dumps(resp), content_type=content_type)
def check_office_token(func):
'''Set the `office_convert_add_task` attr on the request object for office
preview related requests
'''
def newfunc(request, *args, **kwargs):
token = request.META.get('HTTP_X_SEAFILE_OFFICE_PREVIEW_TOKEN', '')
if token and len(token) != 32:
return HttpResponseForbidden()
if not token:
token = request.GET.get('office_preview_token', '')
request.office_preview_token = token
return func(request, *args, **kwargs)
return newfunc
@check_office_token
def office_convert_query_status(request, internal=False):
if not HAS_OFFICE_CONVERTER:
raise Http404
if not internal and not request.is_ajax():
raise Http404
content_type = 'application/json; charset=utf-8'
ret = {'success': False}
file_id = request.GET.get('file_id', '')
if len(file_id) != 40:
ret['error'] = 'invalid param'
elif request.office_preview_token != do_md5(file_id + settings.SECRET_KEY):
return HttpResponseForbidden()
else:
try:
ret = query_office_convert_status(file_id, internal=internal)
except Exception, e:
logging.exception('failed to call query_office_convert_status')
ret['error'] = str(e)
return HttpResponse(json.dumps(ret), content_type=content_type)
# valid static file path inclueds:
# file.css
# file.outline
# 1.page
# 2.page
# ...
_OFFICE_PAGE_PATTERN = re.compile(r'^([0-9a-f]{40})/([\d]+\.page|file\.css|file\.outline|index.html)$')
@check_office_token
def office_convert_get_page(request, path, internal=False):
if not HAS_OFFICE_CONVERTER:
raise Http404
m = _OFFICE_PAGE_PATTERN.match(path)
if not m:
return HttpResponseForbidden()
file_id = m.group(1)
if path.endswith('file.css'):
pass
else:
if request.office_preview_token != do_md5(file_id + settings.SECRET_KEY):
return HttpResponseForbidden()
return get_office_converted_page(request, path, file_id, internal=internal)
@check_office_token
def office_convert_query_page_num(request, internal=False):
if not HAS_OFFICE_CONVERTER:
raise Http404
if not internal and not request.is_ajax():
raise Http404
content_type = 'application/json; charset=utf-8'
ret = {'success': False}
file_id = request.GET.get('file_id', '')
if len(file_id) != 40:
ret['error'] = 'invalid param'
elif request.office_preview_token != do_md5(file_id + settings.SECRET_KEY):
return HttpResponseForbidden()
else:
try:
ret = query_office_file_pages(file_id, internal=internal)
except Exception, e:
logging.exception('failed to call query_office_file_pages')
ret['error'] = str(e)
return HttpResponse(json.dumps(ret), content_type=content_type)
###### private file/dir shares
@login_required
def view_priv_shared_file(request, token):
"""View private shared file.
"""
try:
pfs = PrivateFileDirShare.objects.get_priv_file_dir_share_by_token(token)
except PrivateFileDirShare.DoesNotExist:
raise Http404
repo_id = pfs.repo_id
repo = get_repo(repo_id)
if not repo:
raise Http404
username = request.user.username
if username != pfs.from_user and username != pfs.to_user:
raise Http404 # permission check
if request.GET.get('dl', '') == '1':
# download private shared file
return _download_file_from_share_link(request, pfs)
path = normalize_file_path(pfs.path)
obj_id = seafile_api.get_file_id_by_path(repo.id, path)
if not obj_id:
raise Http404
filename = os.path.basename(path)
filetype, fileext = get_file_type_and_ext(filename)
if filetype == VIDEO or filetype == AUDIO:
access_token = seafile_api.get_fileserver_access_token(repo.id, obj_id,
'view', username,
use_onetime=False)
else:
access_token = seafile_api.get_fileserver_access_token(repo.id, obj_id,
'view', username)
raw_path = gen_file_get_url(access_token, filename)
inner_path = gen_inner_file_get_url(access_token, filename)
# get file content
ret_dict = {'err': '', 'file_content': '', 'encoding': '', 'file_enc': '',
'file_encoding_list': [], 'html_exists': False,
'filetype': filetype}
fsize = get_file_size(repo.store_id, repo.version, obj_id)
exceeds_limit, err_msg = file_size_exceeds_preview_limit(fsize, filetype)
if exceeds_limit:
ret_dict['err'] = err_msg
else:
"""Choose different approach when dealing with different type of file."""
if is_textual_file(file_type=filetype):
handle_textual_file(request, filetype, inner_path, ret_dict)
elif filetype == DOCUMENT:
handle_document(inner_path, obj_id, fileext, ret_dict)
elif filetype == SPREADSHEET:
handle_spreadsheet(inner_path, obj_id, fileext, ret_dict)
elif filetype == PDF:
handle_pdf(inner_path, obj_id, fileext, ret_dict)
accessible_repos = get_unencry_rw_repos_by_user(request)
save_to_link = reverse('save_private_file_share', args=[pfs.token])
office_preview_token = ret_dict.get('office_preview_token', '')
return render_to_response('shared_file_view.html', {
'repo': repo,
'obj_id': obj_id,
'path': path,
'file_name': filename,
'file_size': fsize,
'access_token': access_token,
'fileext': fileext,
'raw_path': raw_path,
'shared_by': pfs.from_user,
'err': ret_dict['err'],
'file_content': ret_dict['file_content'],
'encoding': ret_dict['encoding'],
'file_encoding_list':ret_dict['file_encoding_list'],
'html_exists': ret_dict['html_exists'],
'html_detail': ret_dict.get('html_detail', {}),
'office_preview_token': office_preview_token,
'filetype': ret_dict['filetype'],
'use_pdfjs':USE_PDFJS,
'accessible_repos': accessible_repos,
'save_to_link': save_to_link,
}, context_instance=RequestContext(request))
|
|
from __future__ import print_function
import os
import re
import shutil
from ...workspace_factory import workspace_factory
from ....utils import in_temporary_directory
from ....utils import assert_cmd_success
from ....utils import assert_cmd_failure
from ....utils import assert_files_exist
from ....utils import catkin_success
from ....utils import catkin_failure
from ....utils import redirected_stdio
from ....workspace_assertions import assert_workspace_initialized
from ....workspace_assertions import assert_no_warnings
TEST_DIR = os.path.dirname(__file__)
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources')
BUILD = ['build', '--no-notify', '--no-status']
CLEAN = ['clean', '--yes']
BUILD_TYPES = ['cmake', 'catkin']
def create_flat_workspace(wf, build_type, n_pkgs):
"""Create a bunch of packages with no interdependencies"""
for i in range(n_pkgs):
wf.create_package('pkg_{}'.format(i))
def create_chain_workspace(wf, build_type, n_pkgs):
"""Create a bunch of packages, each of which depends on one other in the
workspace except for the root."""
for i in range(n_pkgs):
wf.create_package(
'pkg_{}'.format(i),
depends=(['pkg_{}'.format(i - 1)] if i > 0 else []))
def create_tree_workspace(wf, build_type, n_pkg_layers, n_children=2):
"""Create a bunch of packages which form a balanced dependency tree"""
n_pkgs = pow(n_children, n_pkg_layers + 1) - 1
for i in range(n_pkgs):
wf.create_package(
'pkg_{}'.format(i),
build_type=build_type,
depends=(['pkg_{}'.format(int((i - 1) / n_children))] if i > 0 else []))
return n_pkgs
@in_temporary_directory
def test_build_no_src():
"""Calling catkin build without a source space should fail."""
assert catkin_failure(BUILD)
def test_build_auto_init_no_pkgs():
"""Test automatically initializing a workspace with no packages."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
wf.build()
assert catkin_success(BUILD)
assert_workspace_initialized('.')
assert_no_warnings(out)
def test_build_auto_init_with_pkg():
"""Test automatically initializing a workspace."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
wf.create_package('pkg_a')
wf.build()
assert catkin_success(BUILD)
assert_workspace_initialized('.')
assert_no_warnings(out)
def test_build_dry_run():
"""Test showing the build jobs without doing anything."""
with redirected_stdio() as (out, err):
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
create_tree_workspace(wf, build_type, 3)
wf.build()
assert catkin_success(BUILD + ['--dry-run'])
assert not os.path.exists('build')
assert not os.path.exists('devel')
def test_build_all_isolate_install():
"""Test building dependent catkin packages with isolated installspace."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
n_pkgs = create_tree_workspace(wf, 'catkin', 2)
wf.create_package('pkg_dep', build_type='catkin',
build_depends=['pkg_{}'.format(n) for n in range(n_pkgs)])
wf.build()
assert catkin_success(['config', '--isolate-install', '--install'])
assert catkin_success(BUILD)
assert os.path.exists('install/pkg_dep')
assert_no_warnings(out)
def test_build_all_isolate_devel():
"""Test building dependent catkin packages with isolated develspace."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
n_pkgs = create_tree_workspace(wf, 'catkin', 2)
wf.create_package('pkg_dep', build_type='catkin',
build_depends=['pkg_{}'.format(n) for n in range(n_pkgs)])
wf.build()
assert catkin_success(['config', '--isolate-devel'])
assert catkin_success(BUILD)
assert os.path.exists('devel/pkg_dep')
assert not os.path.exists('install')
assert_no_warnings(out)
def test_build_all_merged():
"""Test building all packages in a merged workspace"""
pass # TODO: Implement test
def test_build_pkg():
"""Test building a package by name.
"""
with redirected_stdio() as (out, err):
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
create_chain_workspace(wf, build_type, 4)
wf.build()
assert catkin_failure(BUILD + ['pkg_nil'])
assert catkin_success(BUILD + ['pkg_2'])
assert os.path.exists(os.path.join('build', 'pkg_0'))
assert os.path.exists(os.path.join('build', 'pkg_1'))
assert os.path.exists(os.path.join('build', 'pkg_2'))
assert not os.path.exists(os.path.join('build', 'pkg_3'))
def test_build_no_deps():
"""Test building a package by name without deps."""
with redirected_stdio() as (out, err):
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
create_chain_workspace(wf, build_type, 3)
wf.build()
# --no-deps needs an argument
assert catkin_failure(BUILD + ['--no-deps'])
# only pkg_2 shuold be built
assert catkin_success(BUILD + ['pkg_2', '--no-deps'])
assert os.path.exists(os.path.join('build', 'pkg_2'))
assert not os.path.exists(os.path.join('build', 'pkg_1'))
assert not os.path.exists(os.path.join('build', 'pkg_0'))
def test_build_start_with():
"""Test building all packages starting with a specific one."""
with redirected_stdio() as (out, err):
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
create_chain_workspace(wf, build_type, 4)
wf.build()
# --start-with needs an argument
assert catkin_failure(BUILD + ['--start-with'])
# --start-with needs a valid package
assert catkin_failure(BUILD + ['--start-with', 'pkg_nil'])
# this should build all packages
assert catkin_success(BUILD + ['--start-with', 'pkg_0'])
for i in range(4):
assert os.path.exists(os.path.join('build', 'pkg_{}'.format(i)))
assert catkin_success(CLEAN)
# this should skip pkg_2's deps
assert catkin_success(BUILD + ['--start-with', 'pkg_2'])
assert not os.path.exists(os.path.join('build', 'pkg_0'))
assert not os.path.exists(os.path.join('build', 'pkg_1'))
assert os.path.exists(os.path.join('build', 'pkg_2'))
assert os.path.exists(os.path.join('build', 'pkg_3'))
assert catkin_success(CLEAN)
def test_unbuilt_linked():
"""Test building packages which have yet to be built"""
with redirected_stdio() as (out, err):
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
create_chain_workspace(wf, build_type, 2)
wf.build()
# only pkg_0 shuold be built
assert catkin_success(BUILD + ['pkg_0', '--no-deps'])
# the rest should be built, but pkg_0 shouldn't be rebuilt
assert os.path.exists(os.path.join('build', 'pkg_0'))
assert not os.path.exists(os.path.join('build', 'pkg_1'))
pkg_0_log_path = os.path.join('logs', 'pkg_0')
# build the unbuilt packages (rebuild deps)
pkg_0_log_files = os.listdir(pkg_0_log_path)
assert catkin_success(BUILD + ['--unbuilt'])
assert os.path.exists(os.path.join('build', 'pkg_0'))
assert os.path.exists(os.path.join('build', 'pkg_1'))
# make sure pkg_0 has been rebuilt
assert pkg_0_log_files != os.listdir(pkg_0_log_path)
# build the unbuilt packages (don't rebuild deps)
pkg_0_log_files = os.listdir(pkg_0_log_path)
assert catkin_success(['clean', 'pkg_1'])
assert catkin_success(BUILD + ['--unbuilt', '--no-deps'])
assert os.path.exists(os.path.join('build', 'pkg_0'))
assert os.path.exists(os.path.join('build', 'pkg_1'))
# make sure pkg_0 hasn't been rebuilt
assert pkg_0_log_files == os.listdir(pkg_0_log_path)
def test_unbuilt_isolated():
"""Test building unbuilt packages with an isolated develspace."""
pass # TODO: This should succeed, but isn't implemented for isolated develspaces
def test_unbuilt_merged():
"""Test building unbuilt packages with a merged develspace."""
pass # TODO: This should fail, but the check hsan't been tested
def test_continue_on_failure():
"""Test behavior when some packages fail to build."""
pass # TODO: Write test
def test_preclean():
"""Test pre-cleaning packages in a workspace."""
pass # TODO: Write test
def test_force_cmake():
"""Test forcing cmake to run on packages in a workspace."""
pass # TODO: Write test
def test_install():
"""Test building and installing catkin packages without DESTDIR"""
with redirected_stdio() as (out, err):
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
create_chain_workspace(wf, build_type, 2)
wf.build()
assert catkin_success(['config', '--install'])
assert catkin_success(BUILD)
assert os.path.exists(os.path.join('install'))
def test_install_cmake():
"""Test building and installing cmake packages without DESTDIR."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
print(os.getcwd())
wf.build()
shutil.copytree(
os.path.join(RESOURCES_DIR, 'cmake_pkgs'),
os.path.join('src/cmake_pkgs'))
assert catkin_success(['config', '--install'])
assert catkin_success(BUILD)
assert os.path.exists(os.path.join('install'))
def test_install_cmake_destdir():
"""Test building and installing cmake packages with DESTDIR."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
print(os.getcwd())
wf.build()
shutil.copytree(
os.path.join(RESOURCES_DIR, 'cmake_pkgs'),
os.path.join('src/cmake_pkgs'))
tmpinstall_path = os.path.join(os.getcwd(), 'tmpinstall')
env = {'DESTDIR': tmpinstall_path}
assert catkin_success(['config', '--install', '--install-space', '/opt/foo'], env)
assert catkin_success(BUILD, env)
assert os.path.exists(tmpinstall_path)
assert not os.path.exists(os.path.join('install'))
def test_install_catkin_destdir():
"""Test building and installing catkin packages with DESTDIR."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
print(os.getcwd())
wf.build()
shutil.copytree(
os.path.join(RESOURCES_DIR, 'catkin_pkgs', 'products_0'),
os.path.join('src', 'products_0'))
tmpinstall_path = os.path.join(os.getcwd(), 'tmpinstall')
env = {'DESTDIR': tmpinstall_path}
install_space = os.path.abspath(os.path.join('opt', 'foo'))
assert catkin_success(['config', '--install', '--install-space', install_space], env)
assert catkin_success(BUILD, env)
assert os.path.exists(tmpinstall_path)
assert not os.path.exists(os.path.join('install'))
# check for _CATKIN_SETUP_DIR
setup_sh_path = os.path.join(tmpinstall_path, install_space.lstrip(os.sep), 'setup.sh')
print(setup_sh_path)
assert os.path.exists(setup_sh_path)
setup_dir_correct = False
with open(setup_sh_path, "r") as setup_sh:
for line in setup_sh:
if re.search('_CATKIN_SETUP_DIR:={}'.format(install_space), line):
setup_dir_correct = True
break
assert setup_dir_correct is True
def test_pkg_with_unicode_names():
"""Test building a package with unicode file names."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
print(os.getcwd())
wf.build()
shutil.copytree(
os.path.join(RESOURCES_DIR, 'catkin_pkgs', 'products_unicode'),
os.path.join('src/products_unicode'))
assert catkin_success(['config', '--link-devel'])
assert catkin_success(BUILD)
def test_glob_pattern_build():
"""Test building multiple packages given as glob pattern"""
with redirected_stdio() as (out, err):
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
create_flat_workspace(wf, build_type, 11)
wf.build()
assert catkin_success(BUILD + ['pkg_1*'])
assert not os.path.exists(os.path.join('build', 'pkg_0'))
assert os.path.exists(os.path.join('build', 'pkg_1'))
assert os.path.exists(os.path.join('build', 'pkg_10'))
assert not os.path.exists(os.path.join('build', 'pkg_2'))
assert not os.path.exists(os.path.join('build', 'pkg_3'))
assert not os.path.exists(os.path.join('build', 'pkg_4'))
assert not os.path.exists(os.path.join('build', 'pkg_5'))
assert not os.path.exists(os.path.join('build', 'pkg_6'))
assert not os.path.exists(os.path.join('build', 'pkg_7'))
assert not os.path.exists(os.path.join('build', 'pkg_8'))
assert not os.path.exists(os.path.join('build', 'pkg_9'))
def test_pkg_with_conditional_build_type():
"""Test building a dual catkin/ament package."""
with redirected_stdio() as (out, err):
with workspace_factory() as wf:
print(os.getcwd())
wf.build()
shutil.copytree(
os.path.join(RESOURCES_DIR, 'catkin_pkgs', 'build_type_condition'),
os.path.join('src/build_type_condition'))
assert catkin_success(['config', '--merge-devel'])
assert catkin_success(BUILD)
# Currently the build verb skips over packages it doesn't know how to build.
# So we have to infer this skipping by checking the build directory.
msg = "Package with ROS 2 conditional build_type was skipped."
assert os.path.exists(os.path.join('build', 'build_type_condition')), msg
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import warnings
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn, num_parallel_calls=num_parallel_calls)
.prefetch(output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_parallel_calls = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(
components, count, num_parallel_calls, output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_parallel_calls_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.test_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (dataset_ops.Dataset.range(10)
.map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
iterator = (
dataset_ops.Dataset.range(105)
.map(lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(100):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConstantOutput(self):
iterator = (
dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, b"hello", 10), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer([], []), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating lookup tables inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testNestedDatasetError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
NotImplementedError, r"The Dataset.map\(\) transformation does not "
"currently support nested datasets as outputs."):
_ = dataset.map(dataset_ops.Dataset.from_tensor_slices)
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
class MapDatasetBenchmark(test.Benchmark):
def benchmarkChainOfMaps(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset chain length: %d Median wall time: %f"
% (chain_length, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_chain_latency_%d" % chain_length)
def benchmarkMapFanOut(self):
fan_outs = [1, 2, 5, 10, 20, 50, 100]
for fan_out in fan_outs:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(
tuple(0 for _ in range(fan_out))).repeat(None).map(lambda *xs: xs)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element[0].op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element[0].op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset fan out: %d Median wall time: %f"
% (fan_out, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_fan_out_%d" % fan_out)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.vmmigration_v1.services.vm_migration.client import VmMigrationClient
from google.cloud.vmmigration_v1.services.vm_migration.async_client import (
VmMigrationAsyncClient,
)
from google.cloud.vmmigration_v1.types.vmmigration import AddGroupMigrationRequest
from google.cloud.vmmigration_v1.types.vmmigration import AddGroupMigrationResponse
from google.cloud.vmmigration_v1.types.vmmigration import AppliedLicense
from google.cloud.vmmigration_v1.types.vmmigration import CancelCloneJobRequest
from google.cloud.vmmigration_v1.types.vmmigration import CancelCloneJobResponse
from google.cloud.vmmigration_v1.types.vmmigration import CancelCutoverJobRequest
from google.cloud.vmmigration_v1.types.vmmigration import CancelCutoverJobResponse
from google.cloud.vmmigration_v1.types.vmmigration import CloneJob
from google.cloud.vmmigration_v1.types.vmmigration import ComputeEngineTargetDefaults
from google.cloud.vmmigration_v1.types.vmmigration import ComputeEngineTargetDetails
from google.cloud.vmmigration_v1.types.vmmigration import ComputeScheduling
from google.cloud.vmmigration_v1.types.vmmigration import CreateCloneJobRequest
from google.cloud.vmmigration_v1.types.vmmigration import CreateCutoverJobRequest
from google.cloud.vmmigration_v1.types.vmmigration import (
CreateDatacenterConnectorRequest,
)
from google.cloud.vmmigration_v1.types.vmmigration import CreateGroupRequest
from google.cloud.vmmigration_v1.types.vmmigration import CreateMigratingVmRequest
from google.cloud.vmmigration_v1.types.vmmigration import CreateSourceRequest
from google.cloud.vmmigration_v1.types.vmmigration import CreateTargetProjectRequest
from google.cloud.vmmigration_v1.types.vmmigration import CreateUtilizationReportRequest
from google.cloud.vmmigration_v1.types.vmmigration import CutoverJob
from google.cloud.vmmigration_v1.types.vmmigration import DatacenterConnector
from google.cloud.vmmigration_v1.types.vmmigration import (
DeleteDatacenterConnectorRequest,
)
from google.cloud.vmmigration_v1.types.vmmigration import DeleteGroupRequest
from google.cloud.vmmigration_v1.types.vmmigration import DeleteMigratingVmRequest
from google.cloud.vmmigration_v1.types.vmmigration import DeleteSourceRequest
from google.cloud.vmmigration_v1.types.vmmigration import DeleteTargetProjectRequest
from google.cloud.vmmigration_v1.types.vmmigration import DeleteUtilizationReportRequest
from google.cloud.vmmigration_v1.types.vmmigration import FetchInventoryRequest
from google.cloud.vmmigration_v1.types.vmmigration import FetchInventoryResponse
from google.cloud.vmmigration_v1.types.vmmigration import FinalizeMigrationRequest
from google.cloud.vmmigration_v1.types.vmmigration import FinalizeMigrationResponse
from google.cloud.vmmigration_v1.types.vmmigration import GetCloneJobRequest
from google.cloud.vmmigration_v1.types.vmmigration import GetCutoverJobRequest
from google.cloud.vmmigration_v1.types.vmmigration import GetDatacenterConnectorRequest
from google.cloud.vmmigration_v1.types.vmmigration import GetGroupRequest
from google.cloud.vmmigration_v1.types.vmmigration import GetMigratingVmRequest
from google.cloud.vmmigration_v1.types.vmmigration import GetSourceRequest
from google.cloud.vmmigration_v1.types.vmmigration import GetTargetProjectRequest
from google.cloud.vmmigration_v1.types.vmmigration import GetUtilizationReportRequest
from google.cloud.vmmigration_v1.types.vmmigration import Group
from google.cloud.vmmigration_v1.types.vmmigration import ListCloneJobsRequest
from google.cloud.vmmigration_v1.types.vmmigration import ListCloneJobsResponse
from google.cloud.vmmigration_v1.types.vmmigration import ListCutoverJobsRequest
from google.cloud.vmmigration_v1.types.vmmigration import ListCutoverJobsResponse
from google.cloud.vmmigration_v1.types.vmmigration import (
ListDatacenterConnectorsRequest,
)
from google.cloud.vmmigration_v1.types.vmmigration import (
ListDatacenterConnectorsResponse,
)
from google.cloud.vmmigration_v1.types.vmmigration import ListGroupsRequest
from google.cloud.vmmigration_v1.types.vmmigration import ListGroupsResponse
from google.cloud.vmmigration_v1.types.vmmigration import ListMigratingVmsRequest
from google.cloud.vmmigration_v1.types.vmmigration import ListMigratingVmsResponse
from google.cloud.vmmigration_v1.types.vmmigration import ListSourcesRequest
from google.cloud.vmmigration_v1.types.vmmigration import ListSourcesResponse
from google.cloud.vmmigration_v1.types.vmmigration import ListTargetProjectsRequest
from google.cloud.vmmigration_v1.types.vmmigration import ListTargetProjectsResponse
from google.cloud.vmmigration_v1.types.vmmigration import ListUtilizationReportsRequest
from google.cloud.vmmigration_v1.types.vmmigration import ListUtilizationReportsResponse
from google.cloud.vmmigration_v1.types.vmmigration import MigratingVm
from google.cloud.vmmigration_v1.types.vmmigration import MigrationError
from google.cloud.vmmigration_v1.types.vmmigration import NetworkInterface
from google.cloud.vmmigration_v1.types.vmmigration import OperationMetadata
from google.cloud.vmmigration_v1.types.vmmigration import PauseMigrationRequest
from google.cloud.vmmigration_v1.types.vmmigration import PauseMigrationResponse
from google.cloud.vmmigration_v1.types.vmmigration import RemoveGroupMigrationRequest
from google.cloud.vmmigration_v1.types.vmmigration import RemoveGroupMigrationResponse
from google.cloud.vmmigration_v1.types.vmmigration import ReplicationCycle
from google.cloud.vmmigration_v1.types.vmmigration import ReplicationSync
from google.cloud.vmmigration_v1.types.vmmigration import ResumeMigrationRequest
from google.cloud.vmmigration_v1.types.vmmigration import ResumeMigrationResponse
from google.cloud.vmmigration_v1.types.vmmigration import SchedulePolicy
from google.cloud.vmmigration_v1.types.vmmigration import SchedulingNodeAffinity
from google.cloud.vmmigration_v1.types.vmmigration import Source
from google.cloud.vmmigration_v1.types.vmmigration import StartMigrationRequest
from google.cloud.vmmigration_v1.types.vmmigration import StartMigrationResponse
from google.cloud.vmmigration_v1.types.vmmigration import TargetProject
from google.cloud.vmmigration_v1.types.vmmigration import UpdateGroupRequest
from google.cloud.vmmigration_v1.types.vmmigration import UpdateMigratingVmRequest
from google.cloud.vmmigration_v1.types.vmmigration import UpdateSourceRequest
from google.cloud.vmmigration_v1.types.vmmigration import UpdateTargetProjectRequest
from google.cloud.vmmigration_v1.types.vmmigration import UtilizationReport
from google.cloud.vmmigration_v1.types.vmmigration import VmUtilizationInfo
from google.cloud.vmmigration_v1.types.vmmigration import VmUtilizationMetrics
from google.cloud.vmmigration_v1.types.vmmigration import VmwareSourceDetails
from google.cloud.vmmigration_v1.types.vmmigration import VmwareVmDetails
from google.cloud.vmmigration_v1.types.vmmigration import VmwareVmsDetails
from google.cloud.vmmigration_v1.types.vmmigration import ComputeEngineBootOption
from google.cloud.vmmigration_v1.types.vmmigration import ComputeEngineDiskType
from google.cloud.vmmigration_v1.types.vmmigration import ComputeEngineLicenseType
from google.cloud.vmmigration_v1.types.vmmigration import UtilizationReportView
__all__ = (
"VmMigrationClient",
"VmMigrationAsyncClient",
"AddGroupMigrationRequest",
"AddGroupMigrationResponse",
"AppliedLicense",
"CancelCloneJobRequest",
"CancelCloneJobResponse",
"CancelCutoverJobRequest",
"CancelCutoverJobResponse",
"CloneJob",
"ComputeEngineTargetDefaults",
"ComputeEngineTargetDetails",
"ComputeScheduling",
"CreateCloneJobRequest",
"CreateCutoverJobRequest",
"CreateDatacenterConnectorRequest",
"CreateGroupRequest",
"CreateMigratingVmRequest",
"CreateSourceRequest",
"CreateTargetProjectRequest",
"CreateUtilizationReportRequest",
"CutoverJob",
"DatacenterConnector",
"DeleteDatacenterConnectorRequest",
"DeleteGroupRequest",
"DeleteMigratingVmRequest",
"DeleteSourceRequest",
"DeleteTargetProjectRequest",
"DeleteUtilizationReportRequest",
"FetchInventoryRequest",
"FetchInventoryResponse",
"FinalizeMigrationRequest",
"FinalizeMigrationResponse",
"GetCloneJobRequest",
"GetCutoverJobRequest",
"GetDatacenterConnectorRequest",
"GetGroupRequest",
"GetMigratingVmRequest",
"GetSourceRequest",
"GetTargetProjectRequest",
"GetUtilizationReportRequest",
"Group",
"ListCloneJobsRequest",
"ListCloneJobsResponse",
"ListCutoverJobsRequest",
"ListCutoverJobsResponse",
"ListDatacenterConnectorsRequest",
"ListDatacenterConnectorsResponse",
"ListGroupsRequest",
"ListGroupsResponse",
"ListMigratingVmsRequest",
"ListMigratingVmsResponse",
"ListSourcesRequest",
"ListSourcesResponse",
"ListTargetProjectsRequest",
"ListTargetProjectsResponse",
"ListUtilizationReportsRequest",
"ListUtilizationReportsResponse",
"MigratingVm",
"MigrationError",
"NetworkInterface",
"OperationMetadata",
"PauseMigrationRequest",
"PauseMigrationResponse",
"RemoveGroupMigrationRequest",
"RemoveGroupMigrationResponse",
"ReplicationCycle",
"ReplicationSync",
"ResumeMigrationRequest",
"ResumeMigrationResponse",
"SchedulePolicy",
"SchedulingNodeAffinity",
"Source",
"StartMigrationRequest",
"StartMigrationResponse",
"TargetProject",
"UpdateGroupRequest",
"UpdateMigratingVmRequest",
"UpdateSourceRequest",
"UpdateTargetProjectRequest",
"UtilizationReport",
"VmUtilizationInfo",
"VmUtilizationMetrics",
"VmwareSourceDetails",
"VmwareVmDetails",
"VmwareVmsDetails",
"ComputeEngineBootOption",
"ComputeEngineDiskType",
"ComputeEngineLicenseType",
"UtilizationReportView",
)
|
|
import re
import datetime
from collections import defaultdict
import dateutil.parser
import pytz
from django import forms
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.utils.timezone import utc
from django.utils.safestring import mark_safe
from funfactory.urlresolvers import reverse
from slugify import slugify
from airmozilla.base.forms import BaseModelForm, BaseForm
from airmozilla.manage import url_transformer
from airmozilla.main.models import (
Approval,
Event,
EventTweet,
Location,
Region,
Tag,
Template,
Channel,
SuggestedEvent,
SuggestedEventComment,
URLMatch,
EventAssignment,
LocationDefaultEnvironment,
RecruitmentMessage,
Picture,
Topic,
Chapter,
)
from airmozilla.comments.models import Discussion, Comment
from airmozilla.surveys.models import Question, Survey
from airmozilla.staticpages.models import StaticPage
from airmozilla.base.helpers import show_duration_compact
from .widgets import PictureWidget
TIMEZONE_CHOICES = [(tz, tz.replace('_', ' ')) for tz in pytz.common_timezones]
ONE_HOUR = 60 * 60
class UserEditForm(BaseModelForm):
class Meta:
model = User
fields = ('is_active', 'is_staff', 'is_superuser', 'groups')
def clean(self):
cleaned_data = super(UserEditForm, self).clean()
is_active = cleaned_data.get('is_active')
is_staff = cleaned_data.get('is_staff')
is_superuser = cleaned_data.get('is_superuser')
groups = cleaned_data.get('groups')
if is_superuser and not is_staff:
raise forms.ValidationError('Superusers must be staff.')
if is_staff and not is_active:
raise forms.ValidationError('Staff must be active.')
if is_staff and not is_superuser and not groups:
raise forms.ValidationError(
'Non-superuser staff must belong to a group.'
)
return cleaned_data
class GroupEditForm(BaseModelForm):
def __init__(self, *args, **kwargs):
super(GroupEditForm, self).__init__(*args, **kwargs)
self.fields['name'].required = True
choices = self.fields['permissions'].choices
self.fields['permissions'] = forms.MultipleChoiceField(
choices=choices,
widget=forms.CheckboxSelectMultiple,
required=False
)
class Meta:
model = Group
class EventRequestForm(BaseModelForm):
tags = forms.CharField(required=False)
class Meta:
model = Event
widgets = {
'description': forms.Textarea(attrs={'rows': 4}),
'short_description': forms.Textarea(attrs={'rows': 2}),
'call_info': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'template_environment': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'remote_presenters': forms.Textarea(attrs={'rows': 3}),
'start_time': forms.DateTimeInput(format='%Y-%m-%d %H:%M'),
'estimated_duration': forms.widgets.Select(
choices=Event.ESTIMATED_DURATION_CHOICES
),
}
exclude = ('featured', 'status', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'placeholder_img', 'picture',
'description',
'short_description', 'location', 'start_time',
'estimated_duration',
'channels', 'tags', 'call_info',
'remote_presenters',
'additional_links', 'privacy', 'popcorn_url'
)
def __init__(self, *args, **kwargs):
super(EventRequestForm, self).__init__(*args, **kwargs)
self.fields['channels'].help_text = (
'<a href="%s" class="btn btn-default" target="_blank">'
'<i class="glyphicon glyphicon-plus-sign"></i>'
'New channel'
'</a>' % reverse('manage:channel_new'))
self.fields['placeholder_img'].label = 'Placeholder image'
if 'instance' in kwargs:
event = kwargs['instance']
approvals = event.approval_set.all()
self.initial['approvals'] = [app.group for app in approvals]
if event.location:
self.fields['start_time'].help_text = (
'Time zone of this date is that of {0}.'.format(
event.location.timezone
)
)
# when the django forms present the start_time form field,
# it's going to first change it to UTC, then strftime it
self.initial['start_time'] = (
event.location_time.replace(tzinfo=utc)
)
else:
self.fields['start_time'].help_text = (
'Since there is no location, time zone of this date '
' is UTC.'
)
if event.pk:
tags_formatted = ','.join(x.name for x in event.tags.all())
self.initial['tags'] = tags_formatted
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
try:
t = Tag.objects.get(name=tag_name)
except Tag.DoesNotExist:
try:
t = Tag.objects.get(name__iexact=tag_name)
except Tag.DoesNotExist:
t = Tag.objects.create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_slug(self):
"""Enforce unique slug across current slugs and old slugs."""
slug = self.cleaned_data['slug']
if Event.objects.filter(slug=slug).exclude(pk=self.instance.id):
raise forms.ValidationError('This slug is already in use.')
return slug
@staticmethod
def _check_staticpage_slug(slug):
if StaticPage.objects.filter(url__startswith='/%s' % slug).count():
raise forms.ValidationError(
"The default slug for event would clash with an existing "
"static page with the same URL. It might destroy existing "
"URLs that people depend on."
)
def clean(self):
data = super(EventRequestForm, self).clean()
if data.get('title') and not data.get('slug'):
# this means you have submitted a form without being explicit
# about what the slug will be
self._check_staticpage_slug(slugify(data.get('title')).lower())
elif data.get('slug'):
# are you trying to change it?
if self.instance.slug != data['slug']:
# apparently, you want to change to a new slug
self._check_staticpage_slug(data['slug'])
return data
class EventEditForm(EventRequestForm):
approvals = forms.ModelMultipleChoiceField(
queryset=Group.objects.filter(permissions__codename='change_approval'),
required=False,
widget=forms.CheckboxSelectMultiple()
)
curated_groups = forms.CharField(
required=False,
help_text='Curated groups only matter if the event is open to'
' "%s".' % [x[1] for x in Event.PRIVACY_CHOICES
if x[0] == Event.PRIVACY_CONTRIBUTORS][0]
)
class Meta(EventRequestForm.Meta):
exclude = ('archive_time',)
# Fields specified to enforce order
fields = (
'title', 'slug', 'status', 'privacy', 'featured', 'template',
'template_environment', 'placeholder_img', 'picture',
'location',
'description', 'short_description', 'start_time',
'estimated_duration',
'archive_time',
'channels', 'tags',
'call_info', 'additional_links', 'remote_presenters',
'approvals',
'popcorn_url',
'pin',
'recruitmentmessage',
)
def __init__(self, *args, **kwargs):
super(EventEditForm, self).__init__(*args, **kwargs)
if 'pin' in self.fields:
self.fields['pin'].help_text = (
"Use of pins is deprecated. Use Curated groups instead."
)
self.fields['popcorn_url'].label = 'Popcorn URL'
if 'recruitmentmessage' in self.fields:
self.fields['recruitmentmessage'].required = False
self.fields['recruitmentmessage'].label = 'Recruitment message'
self.fields.keyOrder.pop(
self.fields.keyOrder.index('curated_groups')
)
self.fields.keyOrder.insert(
self.fields.keyOrder.index('privacy') + 1,
'curated_groups'
)
self.fields['location'].queryset = (
Location.objects.filter(is_active=True).order_by('name')
)
if self.instance and self.instance.id:
# Checking for id because it might be an instance but never
# been saved before.
self.fields['picture'].widget = PictureWidget(self.instance)
# make the list of approval objects depend on requested approvals
# print Group.approval_set.filter(event=self.instance)
group_ids = [
x[0] for x in
Approval.objects
.filter(event=self.instance).values_list('group')
]
self.fields['approvals'].queryset = Group.objects.filter(
id__in=group_ids
)
# If the event has a duration, it doesn't make sense to
# show the estimated_duration widget.
if self.instance.duration:
del self.fields['estimated_duration']
elif self.initial.get('picture'):
self.fields['picture'].widget = PictureWidget(
Picture.objects.get(id=self.initial['picture']),
editable=False
)
else:
# too early to associate with a picture
del self.fields['picture']
def clean_pin(self):
value = self.cleaned_data['pin']
if value and len(value) < 4:
raise forms.ValidationError("Pin too short to be safe")
return value
def clean(self):
cleaned_data = super(EventEditForm, self).clean()
if not (
cleaned_data.get('placeholder_img') or cleaned_data.get('picture')
):
raise forms.ValidationError("Must have a placeholder or a Picture")
return cleaned_data
class EventExperiencedRequestForm(EventEditForm):
class Meta(EventEditForm.Meta):
exclude = ('featured', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'status', 'privacy', 'template',
'template_environment', 'placeholder_img', 'picture',
'description',
'short_description', 'location', 'start_time',
'estimated_duration',
'channels', 'tags', 'call_info',
'additional_links', 'remote_presenters',
'approvals', 'pin', 'popcorn_url', 'recruitmentmessage'
)
class EventArchiveForm(BaseModelForm):
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('template', 'template_environment')
class EventArchiveTimeForm(BaseModelForm):
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('archive_time',)
def __init__(self, *args, **kwargs):
super(EventArchiveTimeForm, self).__init__(*args, **kwargs)
self.fields['archive_time'].help_text = (
"Input timezone is <b>UTC</b>"
)
if self.initial['archive_time']:
# Force it to a UTC string so Django doesn't convert it
# to a timezone-less string in the settings.TIME_ZONE timezone.
self.initial['archive_time'] = (
self.initial['archive_time'].strftime('%Y-%m-%d %H:%M:%S')
)
def clean_archive_time(self):
value = self.cleaned_data['archive_time']
# force it back to UTC
if value:
value = value.replace(tzinfo=utc)
return value
class EventTweetForm(BaseModelForm):
class Meta:
model = EventTweet
fields = (
'text',
'include_placeholder',
'send_date',
)
widgets = {
'text': forms.Textarea(attrs={
'autocomplete': 'off',
'data-maxlength': 140,
'rows': 2,
})
}
def __init__(self, event, *args, **kwargs):
super(EventTweetForm, self).__init__(*args, **kwargs)
self.fields['text'].help_text = (
'<b class="char-counter">140</b> characters left. '
'<span class="char-counter-warning"><b>Note!</b> Sometimes '
'Twitter can count it as longer than it appears if you '
'include a URL. '
'It\'s usually best to leave a little room.</span>'
)
# it's a NOT NULL field but it defaults to NOW()
# in the views code
self.fields['send_date'].required = False
if event.tags.all():
def pack_tags(tags):
return '[%s]' % (','.join('"%s"' % x for x in tags))
self.fields['text'].help_text += (
'<br><a href="#" class="include-event-tags" '
'data-tags=\'%s\'>include all event tags</a>'
% pack_tags([x.name for x in event.tags.all()])
)
if event.placeholder_img or event.picture:
from airmozilla.main.helpers import thumbnail
if event.picture:
pic = event.picture.file
else:
pic = event.placeholder_img
thumb = thumbnail(pic, '160x90', crop='center')
self.fields['include_placeholder'].help_text = (
'<img src="%(url)s" alt="placeholder" class="thumbnail" '
'width="%(width)s" width="%(height)s">' %
{
'url': thumb.url,
'width': thumb.width,
'height': thumb.height
}
)
else:
del self.fields['include_placeholder']
if event.location:
self.fields['send_date'].help_text = (
'Timezone is %s' % event.location.timezone
)
class ChannelForm(BaseModelForm):
class Meta:
model = Channel
exclude = ('created',)
def __init__(self, *args, **kwargs):
super(ChannelForm, self).__init__(*args, **kwargs)
self.fields['parent'].required = False
if kwargs.get('instance'):
self.fields['parent'].choices = [
(x, y) for (x, y)
in self.fields['parent'].choices
if x != kwargs['instance'].pk
]
def clean(self):
cleaned_data = super(ChannelForm, self).clean()
if 'always_show' in cleaned_data and 'never_show' in cleaned_data:
# if one is true, the other one can't be
if cleaned_data['always_show'] and cleaned_data['never_show']:
raise forms.ValidationError(
"Can't both be on always and never shown"
)
return cleaned_data
class TemplateEditForm(BaseModelForm):
class Meta:
model = Template
widgets = {
'content': forms.Textarea(attrs={'rows': 20})
}
class TemplateMigrateForm(BaseForm):
template = forms.ModelChoiceField(
widget=forms.widgets.RadioSelect(),
queryset=Template.objects.all()
)
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
super(TemplateMigrateForm, self).__init__(*args, **kwargs)
scheduled = defaultdict(int)
removed = defaultdict(int)
events = Event.objects.all()
for each in events.values('template').annotate(Count('template')):
scheduled[each['template']] = each['template__count']
events = events.filter(status=Event.STATUS_REMOVED)
for each in events.values('template').annotate(Count('template')):
removed[each['template']] = each['template__count']
choices = [('', '---------')]
other_templates = Template.objects.exclude(id=self.instance.id)
for template in other_templates.order_by('name'):
choices.append((
template.id,
'{0} ({1} events, {2} removed)'.format(
template.name,
scheduled[template.id],
removed[template.id],
)
))
self.fields['template'].choices = choices
class RecruitmentMessageEditForm(BaseModelForm):
class Meta:
model = RecruitmentMessage
widgets = {
'notes': forms.Textarea(attrs={'rows': 3})
}
exclude = ('modified_user', 'created')
class EventChapterEditForm(BaseModelForm):
timestamp = forms.CharField(widget=forms.widgets.TextInput(
attrs={
'placeholder': 'For example: 22m0s'
}
))
class Meta:
model = Chapter
widgets = {
'text': forms.widgets.TextInput()
}
exclude = ('user', 'created', 'event')
def __init__(self, *args, **kwargs):
self.max_timestamp = None
if kwargs.get('instance'):
self.max_timestamp = kwargs['instance'].event.duration
if kwargs['instance'].timestamp:
kwargs['instance'].timestamp = show_duration_compact(
kwargs['instance'].timestamp
)
super(EventChapterEditForm, self).__init__(*args, **kwargs)
def clean_timestamp(self):
value = self.cleaned_data['timestamp'].strip().replace(' ', '')
hours = re.findall('(\d{1,2})h', value)
minutes = re.findall('(\d{1,2})m', value)
seconds = re.findall('(\d{1,2})s', value)
if seconds:
seconds = int(seconds[0])
else:
seconds = 0
if minutes:
minutes = int(minutes[0])
else:
minutes = 0
if hours:
hours = int(hours[0])
else:
hours = 0
total = seconds + minutes * 60 + hours * 60 * 60
if not total:
raise forms.ValidationError('Must be greater than zero')
if self.max_timestamp:
if total >= self.max_timestamp:
raise forms.ValidationError('Longer than video duration')
return total
class SurveyEditForm(BaseModelForm):
class Meta:
model = Survey
exclude = ('created', 'modified')
def __init__(self, *args, **kwargs):
super(SurveyEditForm, self).__init__(*args, **kwargs)
self.fields['active'].validators.append(self.validate_active)
self.fields['events'].required = False
self.fields['events'].queryset = (
self.fields['events'].queryset.order_by('title')
)
def validate_active(self, value):
if value and not self.instance.question_set.count():
raise forms.ValidationError(
"Survey must have at least one question in order to be active"
)
class SurveyNewForm(BaseModelForm):
class Meta:
model = Survey
fields = ('name', )
class LocationEditForm(BaseModelForm):
timezone = forms.ChoiceField(choices=TIMEZONE_CHOICES)
def __init__(self, *args, **kwargs):
super(LocationEditForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
initial = kwargs['instance'].timezone
else:
initial = settings.TIME_ZONE
self.initial['timezone'] = initial
class Meta:
model = Location
class LocationDefaultEnvironmentForm(BaseModelForm):
class Meta:
model = LocationDefaultEnvironment
fields = ('privacy', 'template', 'template_environment')
class RegionEditForm(BaseModelForm):
class Meta:
model = Region
class TopicEditForm(BaseModelForm):
class Meta:
model = Topic
def __init__(self, *args, **kwargs):
super(TopicEditForm, self).__init__(*args, **kwargs)
self.fields['topic'].widget = forms.widgets.TextInput(attrs={
'placeholder': 'for example Partners for Firefox OS'
})
class ApprovalForm(BaseModelForm):
class Meta:
model = Approval
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class HeadersField(forms.CharField):
widget = forms.widgets.Textarea
def __init__(self, *args, **kwargs):
super(HeadersField, self).__init__(*args, **kwargs)
self.help_text = self.help_text or mark_safe(
"For example <code>Content-Type: text/xml</code>"
)
def to_python(self, value):
if not value:
return {}
headers = {}
for line in [x.strip() for x in value.splitlines() if x.strip()]:
try:
key, value = line.split(':', 1)
except ValueError:
raise forms.ValidationError(line)
headers[key.strip()] = value.strip()
return headers
def prepare_value(self, value):
if isinstance(value, basestring):
# already prepared
return value
elif value is None:
return ''
out = []
for key in sorted(value):
out.append('%s: %s' % (key, value[key]))
return '\n'.join(out)
def widget_attrs(self, widget):
attrs = super(HeadersField, self).widget_attrs(widget)
if 'rows' not in attrs:
attrs['rows'] = 3
return attrs
class StaticPageEditForm(BaseModelForm):
headers = HeadersField(required=False)
class Meta:
model = StaticPage
fields = (
'url',
'title',
'content',
'privacy',
'template_name',
'allow_querystring_variables',
'headers',
)
def __init__(self, *args, **kwargs):
super(StaticPageEditForm, self).__init__(*args, **kwargs)
self.fields['url'].label = 'URL'
self.fields['template_name'].label = 'Template'
choices = (
('', 'Default'),
('staticpages/nosidebar.html', 'Default (but no sidebar)'),
('staticpages/blank.html', 'Blank (no template wrapping)'),
)
self.fields['template_name'].widget = forms.widgets.Select(
choices=choices
)
def clean_url(self):
value = self.cleaned_data['url']
if value.startswith('sidebar'):
# expect it to be something like
# 'sidebar_bottom_how-tos'
try:
__, __, channel_slug = value.split('_', 2)
except ValueError:
raise forms.ValidationError(
"Must be format like `sidebar_bottom_channel-slug`"
)
try:
Channel.objects.get(slug=channel_slug)
except Channel.DoesNotExist:
raise forms.ValidationError(
"No channel slug found called `%s`" % channel_slug
)
return value
def clean(self):
cleaned_data = super(StaticPageEditForm, self).clean()
if 'url' in cleaned_data and 'privacy' in cleaned_data:
if cleaned_data['url'].startswith('sidebar_'):
if cleaned_data['privacy'] != Event.PRIVACY_PUBLIC:
raise forms.ValidationError(
"If a sidebar the privacy must be public"
)
return cleaned_data
class VidlyURLForm(forms.Form):
url = forms.CharField(
required=True,
label='URL',
widget=forms.widgets.TextInput(attrs={
'placeholder': 'E.g. http://videos.mozilla.org/.../file.flv',
'class': 'input-xxlarge',
})
)
token_protection = forms.BooleanField(required=False)
hd = forms.BooleanField(required=False, label='HD')
def __init__(self, *args, **kwargs):
disable_token_protection = kwargs.pop(
'disable_token_protection',
False
)
super(VidlyURLForm, self).__init__(*args, **kwargs)
if disable_token_protection:
self.fields['token_protection'].widget.attrs['disabled'] = (
'disabled'
)
self.fields['token_protection'].required = True
self.fields['token_protection'].help_text = (
'Required for non-public events'
)
def clean_url(self):
# annoyingly, we can't use forms.URLField since it barfs on
# Basic Auth urls. Instead, let's just make some basic validation
# here
value = self.cleaned_data['url']
if ' ' in value or '://' not in value:
raise forms.ValidationError('Not a valid URL')
value, error = url_transformer.run(value)
if error:
raise forms.ValidationError(error)
return value
class EventsAutocompleteForm(BaseForm):
q = forms.CharField(required=True, max_length=200)
max = forms.IntegerField(required=False, min_value=1, max_value=20)
class AcceptSuggestedEventForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('review_comments',)
widgets = {
'review_comments': forms.Textarea(attrs={'rows': 3})
}
class TagEditForm(BaseModelForm):
class Meta:
model = Tag
class TagMergeForm(BaseForm):
keep = forms.ChoiceField(
label='Name to keep',
widget=forms.widgets.RadioSelect()
)
def __init__(self, this_tag, *args, **kwargs):
super(TagMergeForm, self).__init__(*args, **kwargs)
def describe_tag(tag):
count = Event.objects.filter(tags=tag).count()
if count == 1:
tmpl = '%s (%d time)'
else:
tmpl = '%s (%d times)'
return tmpl % (tag.name, count)
self.fields['keep'].choices = [
(x.id, describe_tag(x))
for x in Tag.objects.filter(name__iexact=this_tag.name)
]
class VidlyResubmitForm(VidlyURLForm):
id = forms.IntegerField(widget=forms.widgets.HiddenInput())
class URLMatchForm(BaseModelForm):
class Meta:
model = URLMatch
exclude = ('use_count',)
def clean_name(self):
name = self.cleaned_data['name'].strip()
if URLMatch.objects.filter(name__iexact=name):
raise forms.ValidationError("URL matcher name already in use")
return name
def clean_string(self):
string = self.cleaned_data['string']
try:
re.compile(string)
except Exception as e:
raise forms.ValidationError(e)
return string
class SuggestedEventCommentForm(BaseModelForm):
class Meta:
model = SuggestedEventComment
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class DiscussionForm(BaseModelForm):
class Meta:
model = Discussion
fields = ('enabled', 'closed', 'moderate_all', 'notify_all',
'moderators')
class CommentEditForm(BaseModelForm):
class Meta:
model = Comment
fields = ('status', 'comment', 'flagged')
class CommentsFilterForm(BaseForm):
user = forms.CharField(required=False)
comment = forms.CharField(required=False)
status = forms.ChoiceField(
required=False,
choices=(
(('', 'ALL'),) + Comment.STATUS_CHOICES + (('flagged', 'Flagged'),)
)
)
class CommentsFilterForm(CommentsFilterForm):
event = forms.CharField(required=False)
class EventAssignmentForm(BaseModelForm):
class Meta:
model = EventAssignment
fields = ('locations', 'users')
def __init__(self, *args, **kwargs):
super(EventAssignmentForm, self).__init__(*args, **kwargs)
users = (
User.objects
.extra(select={
'email_lower': 'LOWER(email)'
})
.filter(is_active=True, is_staff=True)
.order_by('email_lower')
)
def describe_user(user):
ret = user.email
if user.first_name or user.last_name:
name = (user.first_name + ' ' + user.last_name).strip()
ret += ' (%s)' % name
return ret
self.fields['users'].choices = [
(x.pk, describe_user(x)) for x in users
]
self.fields['users'].required = False
self.fields['users'].help_text = 'Start typing to find users.'
locations = (
Location.objects.filter(is_active=True)
.order_by('name')
)
if self.instance.event.location:
locations = locations.exclude(pk=self.instance.event.location.pk)
self.fields['locations'].choices = [
(x.pk, x.name) for x in locations
]
self.fields['locations'].required = False
self.fields['locations'].help_text = 'Start typing to find locations.'
class EventTranscriptForm(BaseModelForm):
class Meta:
model = Event
fields = ('transcript', )
class QuestionForm(BaseModelForm):
class Meta:
model = Question
fields = ('question',)
class EventSurveyForm(BaseForm):
survey = forms.ChoiceField(
widget=forms.widgets.RadioSelect()
)
def __init__(self, *args, **kwargs):
super(EventSurveyForm, self).__init__(*args, **kwargs)
def describe_survey(survey):
output = survey.name
if not survey.active:
output += ' (not active)'
count_questions = Question.objects.filter(survey=survey).count()
if count_questions == 1:
output += ' (1 question)'
else:
output += ' (%d questions)' % count_questions
return output
self.fields['survey'].choices = [
('0', 'none')
] + [
(x.id, describe_survey(x)) for x in Survey.objects.all()
]
class PictureForm(BaseModelForm):
class Meta:
model = Picture
fields = ('file', 'notes', 'default_placeholder', 'is_active')
help_texts = {
'is_active': (
"Only active pictures is a choice when users pick picture."
),
}
class AutocompeterUpdateForm(BaseForm):
verbose = forms.BooleanField(required=False)
max_ = forms.IntegerField(required=False)
all = forms.BooleanField(required=False)
flush_first = forms.BooleanField(required=False)
since = forms.IntegerField(
required=False,
help_text="Minutes since last modified"
)
def clean_since(self):
value = self.cleaned_data['since']
if value:
print "Minutes", int(value)
value = datetime.timedelta(minutes=int(value))
return value
class ISODateTimeField(forms.DateTimeField):
def strptime(self, value, __):
return dateutil.parser.parse(value)
class EventsDataForm(BaseForm):
since = ISODateTimeField(required=False)
|
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
import numpy as np
import tensorflow as tf
from ludwig.constants import *
from ludwig.encoders.text_encoders import ENCODER_REGISTRY
from ludwig.features.sequence_feature import SequenceInputFeature
from ludwig.features.sequence_feature import SequenceOutputFeature
from ludwig.utils.math_utils import softmax
from ludwig.utils.metrics_utils import ConfusionMatrix
from ludwig.utils.misc_utils import get_from_registry
from ludwig.utils.misc_utils import set_default_value
from ludwig.utils.misc_utils import set_default_values
from ludwig.utils.strings_utils import PADDING_SYMBOL
from ludwig.utils.strings_utils import UNKNOWN_SYMBOL
from ludwig.utils.strings_utils import build_sequence_matrix
from ludwig.utils.strings_utils import create_vocabulary
logger = logging.getLogger(__name__)
class TextFeatureMixin(object):
type = TEXT
preprocessing_defaults = {
'char_tokenizer': 'characters',
'char_vocab_file': None,
'char_sequence_length_limit': 1024,
'char_most_common': 70,
'word_tokenizer': 'space_punct',
'pretrained_model_name_or_path': None,
'word_vocab_file': None,
'word_sequence_length_limit': 256,
'word_most_common': 20000,
'padding_symbol': PADDING_SYMBOL,
'unknown_symbol': UNKNOWN_SYMBOL,
'padding': 'right',
'lowercase': True,
'missing_value_strategy': FILL_WITH_CONST,
'fill_value': UNKNOWN_SYMBOL
}
@staticmethod
def cast_column(feature, dataset_df, backend):
return dataset_df
@staticmethod
def feature_meta(column, preprocessing_parameters, backend):
(
char_idx2str,
char_str2idx,
char_str2freq,
char_max_len,
char_pad_idx,
char_pad_symbol,
char_unk_symbol,
) = create_vocabulary(
column,
tokenizer_type='characters',
num_most_frequent=preprocessing_parameters['char_most_common'],
lowercase=preprocessing_parameters['lowercase'],
unknown_symbol=preprocessing_parameters['unknown_symbol'],
padding_symbol=preprocessing_parameters['padding_symbol'],
pretrained_model_name_or_path=preprocessing_parameters[
'pretrained_model_name_or_path'],
processor=backend.df_engine
)
(
word_idx2str,
word_str2idx,
word_str2freq,
word_max_len,
word_pad_idx,
word_pad_symbol,
word_unk_symbol,
) = create_vocabulary(
column,
tokenizer_type=preprocessing_parameters['word_tokenizer'],
num_most_frequent=preprocessing_parameters['word_most_common'],
lowercase=preprocessing_parameters['lowercase'],
vocab_file=preprocessing_parameters['word_vocab_file'],
unknown_symbol=preprocessing_parameters['unknown_symbol'],
padding_symbol=preprocessing_parameters['padding_symbol'],
pretrained_model_name_or_path=preprocessing_parameters[
'pretrained_model_name_or_path'],
processor=backend.df_engine
)
return (
char_idx2str,
char_str2idx,
char_str2freq,
char_max_len,
char_pad_idx,
char_pad_symbol,
char_unk_symbol,
word_idx2str,
word_str2idx,
word_str2freq,
word_max_len,
word_pad_idx,
word_pad_symbol,
word_unk_symbol,
)
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
tf_meta = TextFeatureMixin.feature_meta(
column, preprocessing_parameters, backend
)
(
char_idx2str,
char_str2idx,
char_str2freq,
char_max_len,
char_pad_idx,
char_pad_symbol,
char_unk_symbol,
word_idx2str,
word_str2idx,
word_str2freq,
word_max_len,
word_pad_idx,
word_pad_symbol,
word_unk_symbol,
) = tf_meta
char_max_len = min(
preprocessing_parameters['char_sequence_length_limit'],
char_max_len
)
word_max_len = min(
preprocessing_parameters['word_sequence_length_limit'],
word_max_len
)
return {
'char_idx2str': char_idx2str,
'char_str2idx': char_str2idx,
'char_str2freq': char_str2freq,
'char_vocab_size': len(char_idx2str),
'char_max_sequence_length': char_max_len,
'char_pad_idx': char_pad_idx,
'char_pad_symbol': char_pad_symbol,
'char_unk_symbol': char_unk_symbol,
'word_idx2str': word_idx2str,
'word_str2idx': word_str2idx,
'word_str2freq': word_str2freq,
'word_vocab_size': len(word_idx2str),
'word_max_sequence_length': word_max_len,
'word_pad_idx': word_pad_idx,
'word_pad_symbol': word_pad_symbol,
'word_unk_symbol': word_unk_symbol,
}
@staticmethod
def feature_data(column, metadata, preprocessing_parameters, backend):
char_data = build_sequence_matrix(
sequences=column,
inverse_vocabulary=metadata['char_str2idx'],
tokenizer_type=preprocessing_parameters['char_tokenizer'],
length_limit=metadata['char_max_sequence_length'],
padding_symbol=metadata['char_pad_symbol'],
padding=preprocessing_parameters['padding'],
unknown_symbol=metadata['char_unk_symbol'],
lowercase=preprocessing_parameters['lowercase'],
tokenizer_vocab_file=preprocessing_parameters[
'char_vocab_file'
],
pretrained_model_name_or_path=preprocessing_parameters[
'pretrained_model_name_or_path'
],
processor=backend.df_engine
)
word_data = build_sequence_matrix(
sequences=column,
inverse_vocabulary=metadata['word_str2idx'],
tokenizer_type=preprocessing_parameters['word_tokenizer'],
length_limit=metadata['word_max_sequence_length'],
padding_symbol=metadata['word_pad_symbol'],
padding=preprocessing_parameters['padding'],
unknown_symbol=metadata['word_unk_symbol'],
lowercase=preprocessing_parameters['lowercase'],
tokenizer_vocab_file=preprocessing_parameters[
'word_vocab_file'
],
pretrained_model_name_or_path=preprocessing_parameters[
'pretrained_model_name_or_path'
],
processor=backend.df_engine
)
return char_data, word_data
@staticmethod
def add_feature_data(
feature,
input_df,
proc_df,
metadata,
preprocessing_parameters,
backend
):
chars_data, words_data = TextFeatureMixin.feature_data(
input_df[feature[COLUMN]].astype(str),
metadata[feature[NAME]],
preprocessing_parameters,
backend
)
proc_df['{}_char'.format(feature[PROC_COLUMN])] = chars_data
proc_df['{}_word'.format(feature[PROC_COLUMN])] = words_data
return proc_df
class TextInputFeature(TextFeatureMixin, SequenceInputFeature):
encoder = 'parallel_cnn'
max_sequence_length = None
level = 'word'
def __init__(self, feature, encoder_obj=None):
super().__init__(feature, encoder_obj=encoder_obj)
if 'pad_idx' in feature.keys():
self.pad_idx = feature['pad_idx']
else:
self.pad_idx = None
def call(self, inputs, training=None, mask=None):
assert isinstance(inputs, tf.Tensor)
assert inputs.dtype == tf.int8 or inputs.dtype == tf.int16 or \
inputs.dtype == tf.int32 or inputs.dtype == tf.int64
assert len(inputs.shape) == 2
inputs_exp = tf.cast(inputs, dtype=tf.int32)
if self.pad_idx is not None:
inputs_mask = tf.not_equal(inputs, self.pad_idx)
else:
inputs_mask = None
lengths = tf.reduce_sum(tf.cast(inputs_mask, dtype=tf.int32), axis=1)
encoder_output = self.encoder_obj(
inputs_exp, training=training, mask=inputs_mask
)
encoder_output[LENGTHS] = lengths
return encoder_output
@classmethod
def get_input_dtype(cls):
return tf.int32
def get_input_shape(self):
return None,
@staticmethod
def update_config_with_metadata(
input_feature,
feature_metadata,
*args,
**kwargs
):
input_feature['vocab'] = (
feature_metadata[input_feature['level'] + '_idx2str']
)
input_feature['max_sequence_length'] = (
feature_metadata[input_feature['level'] + '_max_sequence_length']
)
input_feature['pad_idx'] = (
feature_metadata[input_feature['level'] + '_pad_idx']
)
input_feature['num_tokens'] = (
len(feature_metadata[input_feature['level'] + '_idx2str'])
)
@staticmethod
def populate_defaults(input_feature):
set_default_values(
input_feature,
{
TIED: None,
'encoder': 'parallel_cnn',
'level': 'word'
}
)
encoder_class = get_from_registry(
input_feature['encoder'],
TextInputFeature.encoder_registry
)
if hasattr(encoder_class, 'default_params'):
set_default_values(
input_feature,
encoder_class.default_params
)
encoder_registry = ENCODER_REGISTRY
class TextOutputFeature(TextFeatureMixin, SequenceOutputFeature):
loss = {TYPE: SOFTMAX_CROSS_ENTROPY}
metric_functions = {LOSS: None, TOKEN_ACCURACY: None, LAST_ACCURACY: None,
PERPLEXITY: None, EDIT_DISTANCE: None}
default_validation_metric = LOSS
max_sequence_length = 0
num_classes = 0
level = 'word'
def __init__(self, feature):
super().__init__(feature)
@classmethod
def get_output_dtype(cls):
return tf.int32
def get_output_shape(self):
return self.max_sequence_length,
def overall_statistics_metadata(self):
return {'level': self.level}
@staticmethod
def update_config_with_metadata(
output_feature,
feature_metadata,
*args,
**kwargs
):
output_feature['num_classes'] = feature_metadata[
'{}_vocab_size'.format(output_feature['level'])
]
output_feature['max_sequence_length'] = feature_metadata[
'{}_max_sequence_length'.format(output_feature['level'])
]
if isinstance(output_feature[LOSS]['class_weights'], (list, tuple)):
# [0, 0] for UNK and PAD
output_feature[LOSS]['class_weights'] = (
[0, 0] + output_feature[LOSS]['class_weights']
)
if (len(output_feature[LOSS]['class_weights']) !=
output_feature['num_classes']):
raise ValueError(
'The length of class_weights ({}) is not compatible with '
'the number of classes ({})'.format(
len(output_feature[LOSS]['class_weights']),
output_feature['num_classes']
)
)
if output_feature[LOSS]['class_similarities_temperature'] > 0:
if 'class_similarities' in output_feature:
distances = output_feature['class_similarities']
temperature = output_feature[LOSS][
'class_similarities_temperature']
for i in range(len(distances)):
distances[i, :] = softmax(
distances[i, :],
temperature=temperature
)
output_feature[LOSS]['class_similarities'] = distances
else:
raise ValueError(
'class_similarities_temperature > 0,'
'but no class similarities are provided '
'for feature {}'.format(output_feature[COLUMN])
)
if output_feature[LOSS][TYPE] == 'sampled_softmax_cross_entropy':
level_str2freq = '{}_str2freq'.format(output_feature['level'])
level_idx2str = '{}_idx2str'.format(output_feature['level'])
output_feature[LOSS]['class_counts'] = [
feature_metadata[level_str2freq][cls]
for cls in feature_metadata[level_idx2str]
]
@staticmethod
def calculate_overall_stats(
predictions,
targets,
train_set_metadata,
):
overall_stats = {}
level_idx2str = '{}_{}'.format(train_set_metadata['level'], 'idx2str')
sequences = targets
last_elem_sequence = sequences[np.arange(sequences.shape[0]),
(sequences != 0).cumsum(1).argmax(1)]
confusion_matrix = ConfusionMatrix(
last_elem_sequence,
predictions[LAST_PREDICTIONS],
labels=train_set_metadata[level_idx2str]
)
overall_stats['confusion_matrix'] = confusion_matrix.cm.tolist()
overall_stats['overall_stats'] = confusion_matrix.stats()
overall_stats['per_class_stats'] = confusion_matrix.per_class_stats()
return overall_stats
def postprocess_predictions(
self,
result,
metadata,
output_directory,
skip_save_unprocessed_output=False,
):
# todo: refactor to reuse SequenceOutputFeature.postprocess_predictions
postprocessed = {}
name = self.feature_name
level_idx2str = '{}_{}'.format(self.level, 'idx2str')
npy_filename = os.path.join(output_directory, '{}_{}.npy')
if PREDICTIONS in result and len(result[PREDICTIONS]) > 0:
preds = result[PREDICTIONS].numpy()
if level_idx2str in metadata:
postprocessed[PREDICTIONS] = [
[metadata[level_idx2str][token]
if token < len(
metadata[level_idx2str]) else UNKNOWN_SYMBOL
for token in pred]
for pred in preds
]
else:
postprocessed[PREDICTIONS] = preds
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, PREDICTIONS), preds)
del result[PREDICTIONS]
if LAST_PREDICTIONS in result and len(result[LAST_PREDICTIONS]) > 0:
last_preds = result[LAST_PREDICTIONS].numpy()
if level_idx2str in metadata:
postprocessed[LAST_PREDICTIONS] = [
metadata[level_idx2str][last_pred]
if last_pred < len(
metadata[level_idx2str]) else UNKNOWN_SYMBOL
for last_pred in last_preds
]
else:
postprocessed[LAST_PREDICTIONS] = last_preds
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, LAST_PREDICTIONS),
last_preds)
del result[LAST_PREDICTIONS]
if PROBABILITIES in result and len(result[PROBABILITIES]) > 0:
probs = result[PROBABILITIES]
if probs is not None:
probs = probs.numpy()
if len(probs) > 0 and isinstance(probs[0], list):
prob = []
for i in range(len(probs)):
for j in range(len(probs[i])):
probs[i][j] = np.max(probs[i][j])
prob.append(np.prod(probs[i]))
else:
probs = np.amax(probs, axis=-1)
prob = np.prod(probs, axis=-1)
# commenting probabilities out because usually it is huge:
# dataset x length x classes
# todo: add a mechanism for letting the user decide to save it
# postprocessed[PROBABILITIES] = probs
postprocessed[PROBABILITY] = prob
if not skip_save_unprocessed_output:
# commenting probabilities out, see comment above
# np.save(npy_filename.format(name, PROBABILITIES), probs)
np.save(npy_filename.format(name, PROBABILITY), prob)
del result[PROBABILITIES]
if LENGTHS in result:
del result[LENGTHS]
return postprocessed
@staticmethod
def populate_defaults(output_feature):
set_default_value(output_feature, 'level', 'word')
SequenceOutputFeature.populate_defaults(output_feature)
|
|
"""
@package mi.instrument.sunburst.sami2_pco2.pco2a.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_pco2/pco2a/driver.py
@author Christopher Wingard
@brief Test cases for pco2a driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Capability
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Parameter
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Protocol
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_pco2.driver import Pco2wSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Pco2waConfigurationDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import DataParticleType
# Added Imports (Note, these pick up some of the base classes not directly imported above)
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverTestMixinSub
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverUnitTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverIntegrationTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverQualificationTest
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_pco2.pco2a.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='V7HE4T',
instrument_agent_name='sunburst_sami2_pco2_pco2a',
instrument_agent_packet_config=DataParticleType(),
##driver_startup_config={}
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.BIT_SWITCHES: 0x01,
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(Pco2DriverTestMixinSub):
"""
Mixin class used for storing data particle constants and common data
assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_capabilities = {
# capabilities defined in the IOS
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_BLANK_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]}
}
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI and Device1
# (external SBE pump) are set to run every 60 minutes, but will be polled
# on a regular schedule rather than autosampled. Device1 is not configured
# to run after the SAMI and will run for 10 seconds. To configure the
# instrument using this string, add a null byte (00) to the end of the
# string.
VALID_CONFIG_STRING = 'CEE90B0002C7EA0001E133800A000E100402000E10010B' + \
'000000000D000000000D000000000D07' + \
'1020FF54181C010038' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI and Device1 (external pump) (responses to R0 and R1
# commands, respectively)
VALID_R0_BLANK_SAMPLE = '*542705CEE91CC800400019096206800730074C2CE042' + \
'74003B0018096106800732074E0D82066124' + SAMI_NEWLINE
VALID_R0_DATA_SAMPLE = '*542704CEE91CC8003B001909620155073003E908A1232' + \
'D0043001A09620154072F03EA0D92065F3B' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00000000, VALUE: 0xCEE90B00, REQUIRED: True},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02C7EA00, VALUE: 0x02C7EA00, REQUIRED: True},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01E13380, VALUE: 0x01E13380, REQUIRED: True},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0A, VALUE: 0x0A, REQUIRED: True},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x04, VALUE: 0x04, REQUIRED: True},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02, VALUE: 0x02, REQUIRED: True},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0B, VALUE: 0x0B, REQUIRED: True},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x00, REQUIRED: True},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x07, VALUE: 0x07, REQUIRED: True},
Parameter.PUMP_PULSE: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x10, VALUE: 0x10, REQUIRED: True},
Parameter.PUMP_DURATION: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x20, VALUE: 0x20, REQUIRED: True},
Parameter.SAMPLES_PER_MEASUREMENT: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0xFF, VALUE: 0xFF, REQUIRED: True},
Parameter.CYCLES_BETWEEN_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x54, VALUE: 0x54, REQUIRED: True},
Parameter.NUMBER_REAGENT_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x18, VALUE: 0x18, REQUIRED: True},
Parameter.NUMBER_BLANK_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1C, VALUE: 0x1C, REQUIRED: True},
Parameter.FLUSH_PUMP_INTERVAL: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.BIT_SWITCHES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x38, VALUE: 0x38, REQUIRED: True},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x38, VALUE: 3600, REQUIRED: True},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.DEIONIZED_WATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.PUMP_100ML_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 4)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x003B, 0x0019, 0x0962, 0x0155,
0x0730, 0x03E9, 0x08A1, 0x232D,
0x0043, 0x001A, 0x0962, 0x0154,
0x072F, 0x03EA], REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D92, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x065F, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x3B, REQUIRED: True}
}
_sami_blank_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 5)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x05, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x0040, 0x0019, 0x0962, 0x0680, 0x0730,
0x074C, 0x2CE0, 0x4274, 0x003B, 0x0018,
0x0961, 0x0680, 0x0732, 0x074E],
REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D82, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x0661, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x24, REQUIRED: True}
}
_configuration_parameters = {
# Configuration settings
Pco2waConfigurationDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCEE90B00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x02C7EA00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x0B, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.USE_BAUD_RATE_57600: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_RECORD_TYPE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_LIVE_RECORDS: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_PULSE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_DURATION: {TYPE: int, VALUE: 0x20, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMPLES_PER_MEASUREMENT: {TYPE: int, VALUE: 0xFF, REQUIRED: True},
Pco2waConfigurationDataParticleKey.CYCLES_BETWEEN_BLANKS: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_REAGENT_CYCLES: {TYPE: int, VALUE: 0x18, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_BLANK_CYCLES: {TYPE: int, VALUE: 0x1C, REQUIRED: True},
Pco2waConfigurationDataParticleKey.FLUSH_PUMP_INTERVAL: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DISABLE_START_BLANK_FLUSH: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.MEASURE_AFTER_PUMP_PULSE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, VALUE: 0x38, REQUIRED: True},
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify
values.
@param current_parameters: driver parameters read from the driver
instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters,
verify_values)
def assert_particle_sami_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particles (Type 4 and 5). Used in INT test where type doesn't matter.
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 4)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 4, msg="Not a regular sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 4
self.assertEquals(record_type, required_record_type)
def assert_particle_sami_blank_sample(self, data_particle, verify_values=False):
"""
Verify sami_blank_sample particle (Type 5)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 5, msg="Not a blank sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_blank_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL)
self.assert_data_particle_parameters(data_particle,
self._sami_blank_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 5
self.assertEquals(record_type, required_record_type)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: Pco2wConfigurationDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2waConfigurationDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit Tests: test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(Pco2DriverUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH_100ML',
'DRIVER_EVENT_REAGENT_FLUSH_100ML'],
ProtocolState.DEIONIZED_WATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DEIONIZED_WATER_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.POLLED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilites
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilites for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might
cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
for part in [self.VALID_STATUS_MESSAGE, self.VALID_R0_BLANK_SAMPLE,
self.VALID_R0_DATA_SAMPLE, self.VALID_CONFIG_STRING]:
self.assert_chunker_sample(chunker, part)
self.assert_chunker_sample_with_noise(chunker, part)
self.assert_chunker_fragmented_sample(chunker, part)
self.assert_chunker_combined_sample(chunker, part)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the
correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_STATUS_MESSAGE,
self.assert_particle_regular_status, True)
self.assert_particle_published(driver, self.VALID_R0_BLANK_SAMPLE,
self.assert_particle_sami_blank_sample, True)
self.assert_particle_published(driver, self.VALID_R0_DATA_SAMPLE,
self.assert_particle_sami_data_sample, True)
self.assert_particle_published(driver, self.VALID_CONFIG_STRING,
self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities. Iterate through available
capabilities, and verify that they can pass successfully through the
filter. Test silly made up capabilities to verify they are blocked by
filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
# capabilities defined in base class test_driver.
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_commands(driver)
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_timing(driver)
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(Pco2DriverIntegrationTest, DriverTestMixinSub):
"""
Integration Tests:
test_startup_params: Verify that driver startup parameters are set properly.
test_set: In command state, test configuration particle generation.
Parameter.PUMP_PULSE
Parameter.PUMP_DURATION
Parameter.SAMPLES_PER_MEASUREMENT
Parameter.CYCLES_BETWEEN_BLANKS
Parameter.NUMBER_REAGENT_CYCLES
Parameter.NUMBER_BLANK_CYCLES
Parameter.FLUSH_PUMP_INTERVAL
Parameter.BIT_SWITCHES
Parameter.NUMBER_EXTRA_PUMP_CYCLES
Parameter.AUTO_SAMPLE_INTERVAL
Negative Set Tests:
START_TIME_FROM_LAUNCH
STOP_TIME_FROM_START
MODE_BITS
SAMI_SAMPLE_INTERVAL
test_commands: In autosample and command states, test particle generation.
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_BLANK_SAMPLE = ProtocolEvent.ACQUIRE_BLANK_SAMPLE
test_autosample: Test autosample particle generation.
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
test_flush_pump: Test flush pump commands
"""
# def test_initialize_driver(self):
# self.assert_initialize_driver()
def test_startup_params(self):
startup_values = {
Parameter.PUMP_PULSE: 0x10,
Parameter.PUMP_DURATION: 0x20,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFF,
Parameter.CYCLES_BETWEEN_BLANKS: 0x54,
Parameter.NUMBER_REAGENT_CYCLES: 0x18,
Parameter.NUMBER_BLANK_CYCLES: 0x1C,
Parameter.FLUSH_PUMP_INTERVAL: 0x01,
Parameter.BIT_SWITCHES: 0x01,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x38,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x08,
Parameter.PUMP_100ML_CYCLES: 1
}
new_values = {
Parameter.PUMP_PULSE: 0x11,
Parameter.PUMP_DURATION: 0x21,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFA,
Parameter.CYCLES_BETWEEN_BLANKS: 0xA9,
Parameter.NUMBER_REAGENT_CYCLES: 0x19,
Parameter.NUMBER_BLANK_CYCLES: 0x1D,
Parameter.FLUSH_PUMP_INTERVAL: 0x02,
Parameter.BIT_SWITCHES: 0x02,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x39,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x01,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x0F,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.CYCLES_BETWEEN_BLANKS, 7)
self.assert_set(Parameter.PUMP_PULSE, 20)
self.assert_set(Parameter.SAMPLES_PER_MEASUREMENT, 239)
self.assert_set(Parameter.NUMBER_REAGENT_CYCLES, 26)
self.assert_set(Parameter.NUMBER_BLANK_CYCLES, 30)
self.assert_set(Parameter.FLUSH_PUMP_INTERVAL, 2)
self.assert_set(Parameter.BIT_SWITCHES, 1)
self.assert_set(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 16)
self.assert_set(Parameter.DEIONIZED_WATER_FLUSH_DURATION, 4)
self.assert_set(Parameter.PUMP_100ML_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.CYCLES_BETWEEN_BLANKS: 7,
Parameter.PUMP_PULSE: 20,
Parameter.SAMPLES_PER_MEASUREMENT: 239,
Parameter.NUMBER_REAGENT_CYCLES: 26,
Parameter.NUMBER_BLANK_CYCLES: 30,
Parameter.FLUSH_PUMP_INTERVAL: 2,
Parameter.BIT_SWITCHES: 1,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 88,
Parameter.REAGENT_FLUSH_DURATION: 4,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 16,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.CYCLES_BETWEEN_BLANKS, value=7.0)
self.assert_set_exception(Parameter.PUMP_PULSE, value=20.0)
self.assert_set_exception(Parameter.SAMPLES_PER_MEASUREMENT, 239.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_CYCLES, 26.0)
self.assert_set_exception(Parameter.NUMBER_BLANK_CYCLES, 30.0)
self.assert_set_exception(Parameter.FLUSH_PUMP_INTERVAL, 2.0)
self.assert_set_exception(Parameter.BIT_SWITCHES, 1.0)
self.assert_set_exception(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88.0)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_acquire_blank_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=160)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=320)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
self.clear_events()
#Now verify that no more particles get generated
failed = False
try:
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
failed = True
except AssertionError:
pass
self.assertFalse(failed)
#Restart autosample
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=320)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_polled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.POLLED_BLANK_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=160)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.SCHEDULED_BLANK_SAMPLE,
delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_queued_command(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
## Queue status
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=180)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.COMMAND)
def test_queued_autosample(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
## Queue sample and status
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=180)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.PCO2W_A_REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_CONFIGURATION,
self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(Pco2DriverQualificationTest, DriverTestMixinSub):
def test_queued_command(self):
self.assert_enter_command_mode()
self.assert_resource_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=4, resource_state=ProtocolState.POLLED_SAMPLE)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_A_REGULAR_STATUS, sample_count=1, timeout=60)
def test_queued_autosample(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_resource_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=4,
resource_state=ProtocolState.SCHEDULED_SAMPLE)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_A_REGULAR_STATUS, sample_count=1, timeout=60)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.BIT_SWITCHES, 0x00)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_A_SAMI_SAMPLE,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
configuration_string = 'CF889C9C02C7EA0001E1338002000E10040200000000000000000000000000000000000000000' + \
'71020FFA8181C0100380000000000000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_A_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_A_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_A_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_A_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_A_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_A_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_A_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_A_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_A_SAMI_SAMPLE)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE,
ProtocolEvent.DEIONIZED_WATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH,
ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML,
ProtocolEvent.REAGENT_FLUSH_100ML
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE
]
self.assert_start_autosample(timeout=200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class responderpolicy_responderpolicylabel_binding(base_resource) :
""" Binding class showing the responderpolicylabel that can be bound to responderpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def name(self) :
"""Name of the responder policy for which to display settings.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the responder policy for which to display settings.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
"""Location where policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""Location where policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(responderpolicy_responderpolicylabel_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.responderpolicy_responderpolicylabel_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch responderpolicy_responderpolicylabel_binding resources.
"""
try :
obj = responderpolicy_responderpolicylabel_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of responderpolicy_responderpolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = responderpolicy_responderpolicylabel_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count responderpolicy_responderpolicylabel_binding resources configued on NetScaler.
"""
try :
obj = responderpolicy_responderpolicylabel_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of responderpolicy_responderpolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = responderpolicy_responderpolicylabel_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class responderpolicy_responderpolicylabel_binding_response(base_response) :
def __init__(self, length=1) :
self.responderpolicy_responderpolicylabel_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.responderpolicy_responderpolicylabel_binding = [responderpolicy_responderpolicylabel_binding() for _ in range(length)]
|
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from opentracing import (
InvalidCarrierException,
SpanContextCorruptedException,
)
from .constants import (
BAGGAGE_HEADER_KEY,
BAGGAGE_HEADER_PREFIX,
DEBUG_ID_HEADER_KEY,
TRACE_ID_HEADER,
)
from .span_context import SpanContext
from .constants import SAMPLED_FLAG, DEBUG_FLAG
import six
from six.moves import urllib_parse
class Codec(object):
def inject(self, span_context, carrier):
raise NotImplementedError()
def extract(self, carrier):
raise NotImplementedError()
class TextCodec(Codec):
def __init__(self,
url_encoding=False,
trace_id_header=TRACE_ID_HEADER,
baggage_header_prefix=BAGGAGE_HEADER_PREFIX,
debug_id_header=DEBUG_ID_HEADER_KEY,
baggage_header=BAGGAGE_HEADER_KEY):
self.url_encoding = url_encoding
self.trace_id_header = trace_id_header.lower().replace('_', '-')
self.baggage_prefix = baggage_header_prefix.lower().replace('_', '-')
self.debug_id_header = debug_id_header.lower().replace('_', '-')
self.baggage_header = baggage_header
self.prefix_length = len(baggage_header_prefix)
def inject(self, span_context, carrier):
if not isinstance(carrier, dict):
raise InvalidCarrierException('carrier not a collection')
# Note: we do not url-encode the trace ID because the ':' separator
# is not a problem for HTTP header values
carrier[self.trace_id_header] = span_context_to_string(
trace_id=span_context.trace_id, span_id=span_context.span_id,
parent_id=span_context.parent_id, flags=span_context.flags)
baggage = span_context.baggage
if baggage:
for key, value in six.iteritems(baggage):
encoded_key = key
if self.url_encoding:
if six.PY2 and isinstance(value, six.text_type):
encoded_value = urllib_parse.quote(value.encode('utf-8'))
else:
encoded_value = urllib_parse.quote(value)
# we assume that self.url_encoding means we are injecting
# into HTTP headers. httplib does not like unicode strings
# so we convert the key to utf-8. The URL-encoded value is
# already a plain string.
if six.PY2 and isinstance(key, six.text_type):
encoded_key = key.encode('utf-8')
else:
if six.PY3 and isinstance(value, six.binary_type):
encoded_value = str(value, 'utf-8')
else:
encoded_value = value
if six.PY3 and isinstance(key, six.binary_type):
encoded_key = str(key, 'utf-8')
# Leave the below print(), you will thank me next time you debug unicode strings
# print('adding baggage', key, '=>', value, 'as', encoded_key, '=>', encoded_value)
header_key = '%s%s' % (self.baggage_prefix, encoded_key)
carrier[header_key] = encoded_value
def extract(self, carrier):
if not hasattr(carrier, 'items'):
raise InvalidCarrierException('carrier not a collection')
trace_id, span_id, parent_id, flags = None, None, None, None
baggage = None
debug_id = None
for key, value in six.iteritems(carrier):
uc_key = key.lower()
if uc_key == self.trace_id_header:
if self.url_encoding:
value = urllib_parse.unquote(value)
trace_id, span_id, parent_id, flags = \
span_context_from_string(value)
elif uc_key.startswith(self.baggage_prefix):
if self.url_encoding:
value = urllib_parse.unquote(value)
attr_key = key[self.prefix_length:]
if baggage is None:
baggage = {attr_key.lower(): value}
else:
baggage[attr_key.lower()] = value
elif uc_key == self.debug_id_header:
if self.url_encoding:
value = urllib_parse.unquote(value)
debug_id = value
elif uc_key == self.baggage_header:
if self.url_encoding:
value = urllib_parse.unquote(value)
baggage = self._parse_baggage_header(value, baggage)
if not trace_id or not span_id:
# reset all IDs
trace_id, span_id, parent_id, flags = None, None, None, None
if not trace_id and not debug_id and not baggage:
return None
return SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=baggage, debug_id=debug_id)
def _parse_baggage_header(self, header, baggage):
for part in header.split(','):
kv = part.strip().split('=')
if len(kv) == 2:
if not baggage:
baggage = {}
baggage[kv[0]] = kv[1]
return baggage
class BinaryCodec(Codec):
"""
BinaryCodec is a no-op.
"""
def inject(self, span_context, carrier):
if not isinstance(carrier, bytearray):
raise InvalidCarrierException('carrier not a bytearray')
pass # TODO binary encoding not implemented
def extract(self, carrier):
if not isinstance(carrier, bytearray):
raise InvalidCarrierException('carrier not a bytearray')
# TODO binary encoding not implemented
return None
def span_context_to_string(trace_id, span_id, parent_id, flags):
"""
Serialize span ID to a string
{trace_id}:{span_id}:{parent_id}:{flags}
Numbers are encoded as variable-length lower-case hex strings.
If parent_id is None, it is written as 0.
:param trace_id:
:param span_id:
:param parent_id:
:param flags:
"""
parent_id = parent_id or 0
return '{:x}:{:x}:{:x}:{:x}'.format(trace_id, span_id, parent_id, flags)
def span_context_from_string(value):
"""
Decode span ID from a string into a TraceContext.
Returns None if the string value is malformed.
:param value: formatted {trace_id}:{span_id}:{parent_id}:{flags}
"""
if type(value) is list and len(value) > 0:
# sometimes headers are presented as arrays of values
if len(value) > 1:
raise SpanContextCorruptedException(
'trace context must be a string or array of 1: "%s"' % value)
value = value[0]
if not isinstance(value, six.string_types):
raise SpanContextCorruptedException(
'trace context not a string "%s"' % value)
parts = value.split(':')
if len(parts) != 4:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
try:
trace_id = int(parts[0], 16)
span_id = int(parts[1], 16)
parent_id = int(parts[2], 16)
flags = int(parts[3], 16)
if trace_id < 1 or span_id < 1 or parent_id < 0 or flags < 0:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
if parent_id == 0:
parent_id = None
return trace_id, span_id, parent_id, flags
except ValueError as e:
raise SpanContextCorruptedException(
'malformed trace context "%s": %s' % (value, e))
# String constants identifying the interop format.
ZipkinSpanFormat = 'zipkin-span-format'
class ZipkinCodec(Codec):
"""
ZipkinCodec handles ZipkinSpanFormat, which is an interop format
used by TChannel.
"""
def inject(self, span_context, carrier):
if not isinstance(carrier, dict):
raise InvalidCarrierException('carrier not a dictionary')
carrier['trace_id'] = span_context.trace_id
carrier['span_id'] = span_context.span_id
carrier['parent_id'] = span_context.parent_id
carrier['traceflags'] = span_context.flags
def extract(self, carrier):
if isinstance(carrier, dict):
trace_id = carrier.get('trace_id')
span_id = carrier.get('span_id')
parent_id = carrier.get('parent_id')
flags = carrier.get('traceflags')
else:
if hasattr(carrier, 'trace_id'):
trace_id = getattr(carrier, 'trace_id')
else:
raise InvalidCarrierException('carrier has no trace_id')
if hasattr(carrier, 'span_id'):
span_id = getattr(carrier, 'span_id')
else:
raise InvalidCarrierException('carrier has no span_id')
if hasattr(carrier, 'parent_id'):
parent_id = getattr(carrier, 'parent_id')
else:
raise InvalidCarrierException('carrier has no parent_id')
if hasattr(carrier, 'traceflags'):
flags = getattr(carrier, 'traceflags')
else:
raise InvalidCarrierException('carrier has no traceflags')
if not trace_id:
return None
return SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=None)
def header_to_hex(header):
if not isinstance(header, (str, six.text_type)):
raise SpanContextCorruptedException(
'malformed trace context "%s", expected hex string' % header)
try:
return int(header, 16)
except ValueError:
raise SpanContextCorruptedException(
'malformed trace context "%s", expected hex string' % header)
class B3Codec(Codec):
"""
Support B3 header properties
https://github.com/openzipkin/b3-propagation
"""
trace_header = 'X-B3-TraceId'
_trace_header_lc = trace_header.lower()
span_header = 'X-B3-SpanId'
_span_header_lc = span_header.lower()
parent_span_header = 'X-B3-ParentSpanId'
_parent_span_header_lc = parent_span_header.lower()
sampled_header = 'X-B3-Sampled'
_sampled_header_lc = sampled_header.lower()
flags_header = 'X-B3-Flags'
_flags_header_lc = flags_header.lower()
def inject(self, span_context, carrier):
if not isinstance(carrier, dict):
raise InvalidCarrierException('carrier not a dictionary')
carrier[self.trace_header] = format(span_context.trace_id, 'x').zfill(16)
carrier[self.span_header] = format(span_context.span_id, 'x').zfill(16)
if span_context.parent_id is not None:
carrier[self.parent_span_header] = format(span_context.parent_id, 'x').zfill(16)
if span_context.flags & DEBUG_FLAG == DEBUG_FLAG:
carrier[self.flags_header] = '1'
elif span_context.flags & SAMPLED_FLAG == SAMPLED_FLAG:
carrier[self.sampled_header] = '1'
def extract(self, carrier):
if not isinstance(carrier, dict):
raise InvalidCarrierException('carrier not a dictionary')
trace_id = span_id = parent_id = None
flags = 0x00
for header_key, header_value in six.iteritems(carrier):
if header_value is None:
continue
lower_key = header_key.lower()
if lower_key == self._trace_header_lc:
trace_id = header_to_hex(header_value)
elif lower_key == self._span_header_lc:
span_id = header_to_hex(header_value)
elif lower_key == self._parent_span_header_lc:
parent_id = header_to_hex(header_value)
elif lower_key == self._sampled_header_lc and header_value == '1':
flags |= SAMPLED_FLAG
elif lower_key == self._flags_header_lc and header_value == '1':
flags |= DEBUG_FLAG
if not trace_id or not span_id:
return None
return SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=None)
|
|
from __future__ import print_function
from opencanary.modules import CanaryService
from opencanary.config import PY3
import twisted
from twisted.cred import portal, checkers, credentials, error
from twisted.conch import error, avatar, interfaces as conchinterfaces
from twisted.conch.checkers import SSHPublicKeyDatabase
from twisted.conch.ssh import factory, userauth, connection, keys, session, transport
from twisted.conch.openssh_compat import primes
from twisted.conch.ssh.common import MP
from twisted.internet import reactor, protocol, defer
from twisted. application import internet
from zope.interface import implementer
import sys, os, time
import base64, struct
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import dsa, rsa
SSH_PATH="/var/tmp"
#pulled from Kippo
from twisted.conch.ssh.common import NS, getNS
class HoneyPotSSHUserAuthServer(userauth.SSHUserAuthServer):
def serviceStarted(self):
userauth.SSHUserAuthServer.serviceStarted(self)
us = self.transport.getHost()
peer = self.transport.getPeer()
logdata = {'LOCALVERSION': self.transport.ourVersionString, 'REMOTEVERSION': self.transport.otherVersionString}
logtype = self.transport.factory.canaryservice.logger.LOG_SSH_REMOTE_VERSION_SENT
log = self.transport.factory.canaryservice.log
log(logdata,
logtype=logtype,
src_host=peer.address.host,
src_port=peer.address.port,
dst_host=us.address.host,
dst_port=us.address.port
)
self.bannerSent = False
def sendBanner(self):
if self.bannerSent:
return
data = ''
data = '\r\n'.join(data.splitlines() + [''])
self.transport.sendPacket(
userauth.MSG_USERAUTH_BANNER, NS(data) + NS('en'))
self.bannerSent = True
def auth_password(self, packet):
"""
Password authentication. Payload::
string password
Make a UsernamePassword credential and verify it with our portal.
"""
password = getNS(packet[1:])[0]
c = credentials.UsernamePassword(self.user, password)
us = self.transport.getHost()
peer = self.transport.getPeer()
logdata = {'USERNAME': self.user, 'PASSWORD': password, 'LOCALVERSION': self.transport.ourVersionString, 'REMOTEVERSION': self.transport.otherVersionString}
logtype = self.transport.factory.canaryservice.logger.LOG_SSH_LOGIN_ATTEMPT
log = self.transport.factory.canaryservice.log
log(logdata,
logtype=logtype,
src_host=peer.address.host,
src_port=peer.address.port,
dst_host=us.address.host,
dst_port=us.address.port)
return self.portal.login(c, None, conchinterfaces.IConchUser).addErrback(
self._ebPassword)
def auth_publickey(self, packet):
try:
#extract the public key blob from the SSH packet
key_blob = getNS(getNS(packet[1:])[1])[0]
except:
key_blob = "No public key found."
try:
#convert blob into openssh key format
key = keys.Key.fromString(key_blob).toString('openssh')
except:
if not PY3:
key = "Invalid SSH Public Key Submitted: {key_blob}".format(key_blob=key_blob.encode('hex'))
else:
key = "Invalid SSH Public Key Submitted: {key_blob}".format(key_blob=key_blob.hex())
for keytype in [b'ecdsa-sha2-nistp256',b'ecdsa-sha2-nistp384',b'ecdsa-sha2-nistp521',b'ssh-ed25519']:
if keytype in key_blob:
key = '{keytype} {keydata}'.format(
keytype=keytype,
keydata=base64.b64encode(key_blob))
print('Key was {key}'.format(key=key))
c = credentials.SSHPrivateKey(None,None,None,None,None)
#self.log(key=key)
return self.portal.login(c, None, conchinterfaces.IConchUser).addErrback(
self._ebPassword)
def ssh_USERAUTH_REQUEST(self, packet):
self.sendBanner()
return userauth.SSHUserAuthServer.ssh_USERAUTH_REQUEST(self, packet)
# As implemented by Kojoney
class HoneyPotSSHFactory(factory.SSHFactory):
services = {
b'ssh-userauth': HoneyPotSSHUserAuthServer,
b'ssh-connection': connection.SSHConnection,
}
# Special delivery to the loggers to avoid scope problems
def logDispatch(self, sessionid, msg):
data = {}
data['logdata'] = msg
self.logger.log(data)
#for dblog in self.dbloggers:
# dblog.logDispatch(sessionid, msg)
def __init__(self, logger=None, version=None):
# protocol^Wwhatever instances are kept here for the interact feature
self.sessions = {}
self.logger = logger
self.version = version
def buildProtocol(self, addr):
# FIXME: try to mimic something real 100%
t = HoneyPotTransport()
_modulis = '/etc/ssh/moduli', '/private/etc/moduli'
if self.version:
t.ourVersionString = self.version
else:
t.ourVersionString = 'empty'
t.supportedPublicKeys = self.privateKeys.keys()
for _moduli in _modulis:
try:
self.primes = primes.parseModuliFile(_moduli)
break
except IOError:
pass
if not self.primes:
ske = t.supportedKeyExchanges[:]
if 'diffie-hellman-group-exchange-sha1' in ske:
ske.remove('diffie-hellman-group-exchange-sha1')
t.supportedKeyExchanges = ske
t.factory = self
return t
@implementer(portal.IRealm)
class HoneyPotRealm:
def __init__(self):
pass
def requestAvatar(self, avatarId, mind, *interfaces):
if conchinterfaces.IConchUser in interfaces:
return interfaces[0], \
HoneyPotAvatar(avatarId, self.env), lambda: None
else:
raise Exception("No supported interfaces found.")
class HoneyPotTransport(transport.SSHServerTransport):
hadVersion = False
def connectionMade(self):
logdata = {'SESSION': str(self.transport.sessionno)}
logtype = self.factory.canaryservice.logger.LOG_SSH_NEW_CONNECTION
log = self.factory.canaryservice.log
log(logdata, transport=self.transport, logtype=logtype)
self.interactors = []
self.logintime = time.time()
self.ttylog_open = False
transport.SSHServerTransport.connectionMade(self)
def sendKexInit(self):
# Don't send key exchange prematurely
if not self.gotVersion:
return
transport.SSHServerTransport.sendKexInit(self)
def dataReceived(self, data):
transport.SSHServerTransport.dataReceived(self, data)
# later versions seem to call sendKexInit again on their own
isLibssh = data.find(b'libssh', data.find(b'SSH-')) != -1
if (twisted.version.major < 11 or isLibssh) and \
not self.hadVersion and self.gotVersion:
self.sendKexInit()
self.hadVersion = True
def ssh_KEXINIT(self, packet):
#print('Remote SSH version: %s' % (self.otherVersionString,))
return transport.SSHServerTransport.ssh_KEXINIT(self, packet)
def ssh_KEX_DH_GEX_REQUEST(self, packet):
MSG_KEX_DH_GEX_GROUP = 31
#We have to override this method since the original will
#pick the client's ideal DH group size. For some SSH clients, this is
#8192 bits, which takes minutes to compute. Instead, we pick the minimum,
#which on our test client was 1024.
if self.ignoreNextPacket:
self.ignoreNextPacket = 0
return
self.dhGexRequest = packet
min, ideal, max = struct.unpack('>3L', packet)
self.g, self.p = self.factory.getDHPrime(min)
self.sendPacket(MSG_KEX_DH_GEX_GROUP, MP(self.p) + MP(self.g))
def lastlogExit(self):
starttime = time.strftime('%a %b %d %H:%M',
time.localtime(self.logintime))
endtime = time.strftime('%H:%M',
time.localtime(time.time()))
duration = str((time.time() - self.logintime))
clientIP = self.transport.getPeer().host
#print('root\tpts/0\t%s\t%s - %s (%s)' % \
# (clientIP, starttime, endtime, duration))
# this seems to be the only reliable place of catching lost connection
def connectionLost(self, reason):
for i in self.interactors:
i.sessionClosed()
if self.transport.sessionno in self.factory.sessions:
del self.factory.sessions[self.transport.sessionno]
#self.lastlogExit()
if self.ttylog_open:
ttylog.ttylog_close(self.ttylog_file, time.time())
self.ttylog_open = False
transport.SSHServerTransport.connectionLost(self, reason)
def sendDisconnect(self, reason, desc):
"""
Workaround for the "bad packet length" error message.
@param reason: the reason for the disconnect. Should be one of the
DISCONNECT_* values.
@type reason: C{int}
@param desc: a descrption of the reason for the disconnection.
@type desc: C{str}
"""
if not 'bad packet length' in desc.decode():
# With python >= 3 we can use super?
transport.SSHServerTransport.sendDisconnect(self, reason, desc)
else:
self.transport.write('Protocol mismatch.\n')
log.msg('Disconnecting with error, code %s\nreason: %s' % \
(reason, desc))
self.transport.loseConnection()
class HoneyPotSSHSession(session.SSHSession):
def request_env(self, data):
#print('request_env: %s' % (repr(data)))
pass
@implementer(conchinterfaces.ISession)
class HoneyPotAvatar(avatar.ConchUser):
def __init__(self, username, env):
avatar.ConchUser.__init__(self)
self.username = username
self.env = env
self.channelLookup.update({'session': HoneyPotSSHSession})
def openShell(self, protocol):
return
def getPty(self, terminal, windowSize, attrs):
return None
def execCommand(self, protocol, cmd):
return
def closed(self):
pass
def eofReceived(self):
pass
def windowChanged(self, windowSize):
self.windowSize = windowSize
def getRSAKeys():
"""
Checks for existing RSA Keys, if there are none, generates a 2048 bit
RSA key pair, saves them to a temporary location and returns the keys
formatted as OpenSSH keys.
"""
public_key = os.path.join(SSH_PATH, 'id_rsa.pub')
private_key = os.path.join(SSH_PATH, 'id_rsa')
if not (os.path.exists(public_key) and os.path.exists(private_key)):
ssh_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
public_key_string = ssh_key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH)
private_key_string = ssh_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption())
with open(public_key, 'w+b') as key_file:
key_file.write(public_key_string)
with open(private_key, 'w+b') as key_file:
key_file.write(private_key_string)
else:
with open(public_key) as key_file:
public_key_string = key_file.read()
with open(private_key) as key_file:
private_key_string = key_file.read()
return public_key_string, private_key_string
def getDSAKeys():
"""
Checks for existing DSA Keys, if there are none, generates a 2048 bit
DSA key pair, saves them to a temporary location and returns the keys
formatted as OpenSSH keys.
"""
public_key = os.path.join(SSH_PATH, 'id_dsa.pub')
private_key = os.path.join(SSH_PATH, 'id_dsa')
if not (os.path.exists(public_key) and os.path.exists(private_key)):
ssh_key = dsa.generate_private_key(
key_size=1024,
backend=default_backend())
public_key_string = ssh_key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH)
private_key_string = ssh_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption())
with open(public_key, 'w+b') as key_file:
key_file.write(public_key_string)
with open(private_key, 'w+b') as key_file:
key_file.write(private_key_string)
else:
with open(public_key) as key_file:
public_key_string = key_file.read()
with open(private_key) as key_file:
private_key_string = key_file.read()
return public_key_string, private_key_string
@implementer(checkers.ICredentialsChecker)
class HoneypotPasswordChecker:
credentialInterfaces = (credentials.IUsernamePassword,)
def __init__(self, logger=None):
self.logger = logger
self.auth_attempt = 0
def requestAvatarId(self, credentials):
return defer.fail(error.UnauthorizedLogin())
@implementer(checkers.ICredentialsChecker)
class CanaryPublicKeyChecker:
credentialInterfaces = (credentials.ISSHPrivateKey,)
def __init__(self, logger=None):
self.logger = logger
self.auth_attempt = 0
def requestAvatarId(self, credentials):
return defer.fail(error.UnauthorizedLogin())
class CanarySSH(CanaryService):
NAME = 'ssh'
def __init__(self,config=None, logger=None):
CanaryService.__init__(self, config=config, logger=logger)
self.port = int(config.getVal("ssh.port", default=22))
self.version = config.getVal("ssh.version", default="SSH-2.0-OpenSSH_5.1p1 Debian-5").encode('utf8')
self.listen_addr = config.getVal('device.listen_addr', default='')
def getService(self):
factory = HoneyPotSSHFactory(version=self.version, logger=self.logger)
factory.canaryservice = self
factory.portal = portal.Portal(HoneyPotRealm())
rsa_pubKeyString, rsa_privKeyString = getRSAKeys()
dsa_pubKeyString, dsa_privKeyString = getDSAKeys()
factory.portal.registerChecker(HoneypotPasswordChecker(logger=factory.logger))
factory.portal.registerChecker(CanaryPublicKeyChecker(logger=factory.logger))
factory.publicKeys = {b'ssh-rsa': keys.Key.fromString(data=rsa_pubKeyString),
b'ssh-dss': keys.Key.fromString(data=dsa_pubKeyString)}
factory.privateKeys = {b'ssh-rsa': keys.Key.fromString(data=rsa_privKeyString),
b'ssh-dss': keys.Key.fromString(data=dsa_privKeyString)}
return internet.TCPServer(self.port, factory, interface=self.listen_addr)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Coordinator.job_properties'
db.add_column('oozie_coordinator', 'job_properties', self.gf('django.db.models.fields.TextField')(default='[]'), keep_default=False)
try:
from oozie.models import Coordinator
Coordinator.objects.all().update(job_properties='[]')
except Exception as e:
import logging
logging.warn(e)
def backwards(self, orm):
# Deleting field 'Coordinator.job_properties'
db.delete_column('oozie_coordinator', 'job_properties')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 16, 9, 46, 22, 231292)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 13, 9, 46, 22, 231260)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 13, 9, 46, 22, 232054)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
|
|
import itertools
import re
from PIL import Image, ImageChops, ImageFilter
from easy_thumbnails import utils
def _compare_entropy(start_slice, end_slice, slice, difference):
"""
Calculate the entropy of two slices (from the start and end of an axis),
returning a tuple containing the amount that should be added to the start
and removed from the end of the axis.
"""
start_entropy = utils.image_entropy(start_slice)
end_entropy = utils.image_entropy(end_slice)
if end_entropy and abs(start_entropy / end_entropy - 1) < 0.01:
# Less than 1% difference, remove from both sides.
if difference >= slice * 2:
return slice, slice
half_slice = slice // 2
return half_slice, slice - half_slice
if start_entropy > end_entropy:
return 0, slice
else:
return slice, 0
def _points_table():
"""
Iterable to map a 16 bit grayscale image to 8 bits.
"""
for i in range(256):
for j in itertools.repeat(i, 256):
yield j
def colorspace(im, bw=False, replace_alpha=False, **kwargs):
"""
Convert images to the correct color space.
A passive option (i.e. always processed) of this method is that all images
(unless grayscale) are converted to RGB colorspace.
This processor should be listed before :func:`scale_and_crop` so palette is
changed before the image is resized.
bw
Make the thumbnail grayscale (not really just black & white).
replace_alpha
Replace any transparency layer with a solid color. For example,
``replace_alpha='#fff'`` would replace the transparency layer with
white.
"""
if im.mode == 'I':
# PIL (and pillow) have can't convert 16 bit grayscale images to lower
# modes, so manually convert them to an 8 bit grayscale.
im = im.point(list(_points_table()), 'L')
is_transparent = utils.is_transparent(im)
is_grayscale = im.mode in ('L', 'LA')
new_mode = im.mode
if is_grayscale or bw:
new_mode = 'L'
else:
new_mode = 'RGB'
if is_transparent:
if replace_alpha:
if im.mode != 'RGBA':
im = im.convert('RGBA')
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im, mask=im)
im = base
else:
new_mode = new_mode + 'A'
if im.mode != new_mode:
im = im.convert(new_mode)
return im
def autocrop(im, autocrop=False, **kwargs):
"""
Remove any unnecessary whitespace from the edges of the source image.
This processor should be listed before :func:`scale_and_crop` so the
whitespace is removed from the source image before it is resized.
autocrop
Activates the autocrop method for this image.
"""
if autocrop:
# If transparent, flatten.
if utils.is_transparent(im):
no_alpha = Image.new('L', im.size, (255))
no_alpha.paste(im, mask=im.split()[-1])
else:
no_alpha = im.convert('L')
# Convert to black and white image.
bw = no_alpha.convert('L')
# bw = bw.filter(ImageFilter.MedianFilter)
# White background.
bg = Image.new('L', im.size, 255)
bbox = ImageChops.difference(bw, bg).getbbox()
if bbox:
im = im.crop(bbox)
return im
def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
**kwargs):
"""
Handle scaling and cropping the source image.
Images can be scaled / cropped against a single dimension by using zero
as the placeholder in the size. For example, ``size=(100, 0)`` will cause
the image to be resized to 100 pixels wide, keeping the aspect ratio of
the source image.
crop
Crop the source image height or width to exactly match the requested
thumbnail size (the default is to proportionally resize the source
image to fit within the requested thumbnail size).
By default, the image is centered before being cropped. To crop from
the edges, pass a comma separated string containing the ``x`` and ``y``
percentage offsets (negative values go from the right/bottom). Some
examples follow:
* ``crop="0,0"`` will crop from the left and top edges.
* ``crop="-10,-0"`` will crop from the right edge (with a 10% offset)
and the bottom edge.
* ``crop=",0"`` will keep the default behavior for the x axis
(horizontally centering the image) and crop from the top edge.
The image can also be "smart cropped" by using ``crop="smart"``. The
image is incrementally cropped down to the requested size by removing
slices from edges with the least entropy.
Finally, you can use ``crop="scale"`` to simply scale the image so that
at least one dimension fits within the size dimensions given (you may
want to use the upscale option too).
upscale
Allow upscaling of the source image during scaling.
zoom
A percentage to zoom in on the scaled image. For example, a zoom of
``40`` will clip 20% off each side of the source image before
thumbnailing.
target
Set the focal point as a percentage for the image if it needs to be
cropped (defaults to ``(50, 50)``).
For example, ``target="10,20"`` will set the focal point as 10% and 20%
from the left and top of the image, respectively. If the image needs to
be cropped, it will trim off the right and bottom edges until the focal
point is centered.
Can either be set as a two-item tuple such as ``(20, 30)`` or a comma
separated string such as ``"20,10"``.
A null value such as ``(20, None)`` or ``",60"`` will default to 50%.
"""
source_x, source_y = [float(v) for v in im.size]
target_x, target_y = [int(v) for v in size]
if crop or not target_x or not target_y:
scale = max(target_x / source_x, target_y / source_y)
else:
scale = min(target_x / source_x, target_y / source_y)
# Handle one-dimensional targets.
if not target_x:
target_x = round(source_x * scale)
elif not target_y:
target_y = round(source_y * scale)
if zoom:
if not crop:
target_x = round(source_x * scale)
target_y = round(source_y * scale)
crop = True
scale *= (100 + int(zoom)) / 100.0
if scale < 1.0 or (scale > 1.0 and upscale):
# Resize the image to the target size boundary. Round the scaled
# boundary sizes to avoid floating point errors.
im = im.resize((int(round(source_x * scale)),
int(round(source_y * scale))),
resample=Image.ANTIALIAS)
if crop:
# Use integer values now.
source_x, source_y = im.size
# Difference between new image size and requested size.
diff_x = int(source_x - min(source_x, target_x))
diff_y = int(source_y - min(source_y, target_y))
if crop != 'scale' and (diff_x or diff_y):
if isinstance(target, str):
target = re.match(r'(\d+)?,(\d+)?$', target)
if target:
target = target.groups()
if target:
focal_point = [int(n) if (n or n == 0) else 50 for n in target]
else:
focal_point = 50, 50
# Crop around the focal point
halftarget_x, halftarget_y = int(target_x / 2), int(target_y / 2)
focal_point_x = int(source_x * focal_point[0] / 100)
focal_point_y = int(source_y * focal_point[1] / 100)
box = [
max(0, min(source_x - target_x, focal_point_x - halftarget_x)),
max(0, min(source_y - target_y, focal_point_y - halftarget_y)),
]
box.append(int(min(source_x, box[0] + target_x)))
box.append(int(min(source_y, box[1] + target_y)))
# See if an edge cropping argument was provided.
edge_crop = (isinstance(crop, str) and
re.match(r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$', crop))
if edge_crop and filter(None, edge_crop.groups()):
x_right, x_crop, y_bottom, y_crop = edge_crop.groups()
if x_crop:
offset = min(int(target_x) * int(x_crop) // 100, diff_x)
if x_right:
box[0] = diff_x - offset
box[2] = source_x - offset
else:
box[0] = offset
box[2] = source_x - (diff_x - offset)
if y_crop:
offset = min(int(target_y) * int(y_crop) // 100, diff_y)
if y_bottom:
box[1] = diff_y - offset
box[3] = source_y - offset
else:
box[1] = offset
box[3] = source_y - (diff_y - offset)
# See if the image should be "smart cropped".
elif crop == 'smart':
left = top = 0
right, bottom = source_x, source_y
while diff_x:
slice = min(diff_x, max(diff_x // 5, 10))
start = im.crop((left, 0, left + slice, source_y))
end = im.crop((right - slice, 0, right, source_y))
add, remove = _compare_entropy(start, end, slice, diff_x)
left += add
right -= remove
diff_x = diff_x - add - remove
while diff_y:
slice = min(diff_y, max(diff_y // 5, 10))
start = im.crop((0, top, source_x, top + slice))
end = im.crop((0, bottom - slice, source_x, bottom))
add, remove = _compare_entropy(start, end, slice, diff_y)
top += add
bottom -= remove
diff_y = diff_y - add - remove
box = (left, top, right, bottom)
# Finally, crop the image!
im = im.crop(box)
return im
def filters(im, detail=False, sharpen=False, **kwargs):
"""
Pass the source image through post-processing filters.
sharpen
Sharpen the thumbnail image (using the PIL sharpen filter)
detail
Add detail to the image, like a mild *sharpen* (using the PIL
``detail`` filter).
"""
if detail:
im = im.filter(ImageFilter.DETAIL)
if sharpen:
im = im.filter(ImageFilter.SHARPEN)
return im
def background(im, size, background=None, **kwargs):
"""
Add borders of a certain color to make the resized image fit exactly within
the dimensions given.
background
Background color to use
"""
if not background:
# Primary option not given, nothing to do.
return im
if not size[0] or not size[1]:
# One of the dimensions aren't specified, can't do anything.
return im
x, y = im.size
if x >= size[0] and y >= size[1]:
# The image is already equal to (or larger than) the expected size, so
# there's nothing to do.
return im
im = colorspace(im, replace_alpha=background, **kwargs)
new_im = Image.new('RGB', size, background)
if new_im.mode != im.mode:
new_im = new_im.convert(im.mode)
offset = (size[0]-x)//2, (size[1]-y)//2
new_im.paste(im, offset)
return new_im
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
XML_WARNING = False
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
global XML_WARNING
if not XML_WARNING:
LOG.warn(_LW('XML support has been deprecated and may be removed '
'as early as the Juno release.'))
XML_WARNING = True
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = \
strutils.bool_from_string(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hints
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
server["block_device_mapping"] = block_device_mapping
block_device_mapping_v2 = self._extract_block_device_mapping_v2(
server_node)
if block_device_mapping_v2 is not None:
server["block_device_mapping_v2"] = block_device_mapping_v2
# NOTE(vish): Support this incorrect version because it was in the code
# base for a while and we don't want to accidentally break
# anyone that might be using it.
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
auto_disk_config = server_node.getAttribute('OS-DCF:diskConfig')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
config_drive = server_node.getAttribute('config_drive')
if config_drive:
server['config_drive'] = config_drive
return server
def _extract_block_device_mapping(self, server_node):
"""Marshal the block_device_mapping node of a parsed request."""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
mapping = {}
attributes = ["volume_id", "snapshot_id", "device_name",
"virtual_name", "volume_size"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = value
attributes = ["delete_on_termination", "no_device"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = strutils.bool_from_string(value)
block_device_mapping.append(mapping)
return block_device_mapping
else:
return None
def _extract_block_device_mapping_v2(self, server_node):
"""Marshal the new block_device_mappings."""
node = self.find_first_child_named(server_node,
"block_device_mapping_v2")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
block_device_mapping.append(
dict((attr, child.getAttribute(attr))
for attr in block_device.bdm_new_api_fields
if child.getAttribute(attr)))
return block_device_mapping
def _extract_scheduler_hints(self, server_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request."""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name = self.find_attribute_or_element(sg_node, 'name')
if name:
item["name"] = name
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
if node.hasAttribute("auto_disk_config"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
if node.hasAttribute("adminPass"):
rebuild["adminPass"] = node.getAttribute("adminPass")
if node.hasAttribute("accessIPv4"):
rebuild["accessIPv4"] = node.getAttribute("accessIPv4")
if node.hasAttribute("accessIPv6"):
rebuild["accessIPv6"] = node.getAttribute("accessIPv6")
if node.hasAttribute("preserve_ephemeral"):
rebuild["preserve_ephemeral"] = strutils.bool_from_string(
node.getAttribute("preserve_ephemeral"), strict=True)
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['OS-DCF:diskConfig'] = node.getAttribute("auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
resize['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker,
want_objects=True)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found", search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = objects.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
try:
request.port_id = network.get('port', None)
except ValueError:
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % network.get('port')
raise exc.HTTPBadRequest(explanation=msg)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if (not request.port_id and not
uuidutils.is_uuid_like(request.network_id)):
br_uuid = request.network_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % request.network_id
raise exc.HTTPBadRequest(explanation=msg)
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
try:
request.address = network.get('fixed_ip', None)
except ValueError:
msg = _("Invalid fixed IP address (%s)") % request.address
raise exc.HTTPBadRequest(explanation=msg)
if (request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly."""
if not user_data:
return
if self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
if not utils.is_valid_ipv4(address):
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
if not utils.is_valid_ipv6(address):
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id,
want_objects=True)
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = None
if (self.ext_mgr.is_loaded('os-networks')
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
if not isinstance(requested_networks, list):
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# optional openstack extensions:
key_name = None
if self.ext_mgr.is_loaded('os-keypairs'):
key_name = server_dict.get('key_name')
user_data = None
if self.ext_mgr.is_loaded('os-user-data'):
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = None
if self.ext_mgr.is_loaded('os-availability-zone'):
availability_zone = server_dict.get('availability_zone')
block_device_mapping = None
block_device_mapping_v2 = None
legacy_bdm = True
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):
# Consider the new data format for block device mapping
block_device_mapping_v2 = server_dict.get(
'block_device_mapping_v2', [])
# NOTE (ndipanov): Disable usage of both legacy and new
# block device format in the same request
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
try:
block_device_mapping_v2 = [
block_device.BlockDeviceDict.from_api(bdm_dict)
for bdm_dict in block_device_mapping_v2]
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
block_device_mapping = (block_device_mapping or
block_device_mapping_v2)
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
try:
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=context,
read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.NetworkNotFound,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
exception.InstanceUserDataTooLarge,
exception.InstanceUserDataMalformed) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
except exception.Invalid as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return wsgi.ResponseObject({'reservation_id': resv_id},
xml=ServerMultipleCreateTemplate)
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id,
want_objects=True)
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeDisk as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost,
exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
if not image_href:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
bdm_v2 = data['server'].get('block_device_mapping_v2')
if (not image_ref and (
(bdm and self.ext_mgr.is_loaded('os-volumes')) or
(bdm_v2 and
self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if ('changePassword' not in body
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = self._get_server_admin_password(body['changePassword'])
server = self._get_server(context, req, id)
try:
self.compute_api.set_admin_password(context, server, password)
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
body = body['rebuild']
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(body)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
kwargs = {}
# take the preserve_ephemeral value into account only when the
# corresponding extension is active
if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')
and 'preserve_ephemeral' in body):
kwargs['preserve_ephemeral'] = strutils.bool_from_string(
body['preserve_ephemeral'], strict=True)
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs.pop('files_to_inject')
files_to_inject = self._get_injected_files(personality)
else:
files_to_inject = None
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
files_to_inject=files_to_inject,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
properties = bdms.root_metadata(
context, self.compute_api.image_api,
self.compute_api.volume_api)
image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
url_prefix = self._view_builder._update_glance_link_prefix(
req.application_url)
image_ref = os.path.join(url_prefix,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.