repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
frappe/frappe | frappe/website/doctype/web_page_view/web_page_view.py | 2 | 1276 | # Copyright (c) 2020, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
from frappe.model.document import Document
class WebPageView(Document):
pass
@frappe.whitelist(allow_guest=True)
def make_view_log(path, referrer=None, browser=None, version=None, url=None, user_tz=None):
if not is_tracking_enabled():
return
request_dict = frappe.request.__dict__
user_agent = request_dict.get("environ", {}).get("HTTP_USER_AGENT")
if referrer:
referrer = referrer.split("?")[0]
is_unique = True
if referrer.startswith(url):
is_unique = False
if path != "/" and path.startswith("/"):
path = path[1:]
view = frappe.new_doc("Web Page View")
view.path = path
view.referrer = referrer
view.browser = browser
view.browser_version = version
view.time_zone = user_tz
view.user_agent = user_agent
view.is_unique = is_unique
try:
if frappe.flags.read_only:
view.deferred_insert()
else:
view.insert(ignore_permissions=True)
except Exception:
if frappe.message_log:
frappe.message_log.pop()
@frappe.whitelist()
def get_page_view_count(path):
return frappe.db.count("Web Page View", filters={"path": path})
def is_tracking_enabled():
return frappe.db.get_single_value("Website Settings", "enable_view_tracking")
| mit | 29abd096a304c6e8a1ba0b2149931912 | 22.2 | 91 | 0.71395 | 3.067308 | false | false | false | false |
frappe/frappe | frappe/desk/page/activity/activity.py | 3 | 1931 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
from frappe.core.doctype.activity_log.feed import get_feed_match_conditions
from frappe.utils import cint
@frappe.whitelist()
def get_feed(start, page_length):
"""get feed"""
match_conditions_communication = get_feed_match_conditions(frappe.session.user, "Communication")
match_conditions_comment = get_feed_match_conditions(frappe.session.user, "Comment")
result = frappe.db.sql(
"""select X.*
from (select name, owner, modified, creation, seen, comment_type,
reference_doctype, reference_name, '' as link_doctype, '' as link_name, subject,
communication_type, communication_medium, content
from
`tabCommunication`
where
communication_type = 'Communication'
and communication_medium != 'Email'
and {match_conditions_communication}
UNION
select name, owner, modified, creation, '0', 'Updated',
reference_doctype, reference_name, link_doctype, link_name, subject,
'Comment', '', content
from
`tabActivity Log`
UNION
select name, owner, modified, creation, '0', comment_type,
reference_doctype, reference_name, link_doctype, link_name, '',
'Comment', '', content
from
`tabComment`
where
{match_conditions_comment}
) X
order by X.creation DESC
LIMIT %(page_length)s
OFFSET %(start)s""".format(
match_conditions_comment=match_conditions_comment,
match_conditions_communication=match_conditions_communication,
),
{"user": frappe.session.user, "start": cint(start), "page_length": cint(page_length)},
as_dict=True,
)
return result
@frappe.whitelist()
def get_heatmap_data():
return dict(
frappe.db.sql(
"""select unix_timestamp(date(creation)), count(name)
from `tabActivity Log`
where
date(creation) > subdate(curdate(), interval 1 year)
group by date(creation)
order by creation asc"""
)
)
| mit | fbab554ea65a8c26cfc2ef80ef905e56 | 28.707692 | 97 | 0.708441 | 3.352431 | false | false | false | false |
frappe/frappe | frappe/core/doctype/domain/domain.py | 3 | 3893 | # Copyright (c) 2017, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from frappe.model.document import Document
class Domain(Document):
"""Domain documents are created automatically when DocTypes
with "Restricted" domains are imported during
installation or migration"""
def setup_domain(self):
"""Setup domain icons, permissions, custom fields etc."""
self.setup_data()
self.setup_roles()
self.setup_properties()
self.set_values()
if not int(frappe.defaults.get_defaults().setup_complete or 0):
# if setup not complete, setup desktop etc.
self.setup_sidebar_items()
self.set_default_portal_role()
if self.data.custom_fields:
create_custom_fields(self.data.custom_fields)
if self.data.on_setup:
# custom on_setup method
frappe.get_attr(self.data.on_setup)()
def remove_domain(self):
"""Unset domain settings"""
self.setup_data()
if self.data.restricted_roles:
for role_name in self.data.restricted_roles:
if frappe.db.exists("Role", role_name):
role = frappe.get_doc("Role", role_name)
role.disabled = 1
role.save()
self.remove_custom_field()
def remove_custom_field(self):
"""Remove custom_fields when disabling domain"""
if self.data.custom_fields:
for doctype in self.data.custom_fields:
custom_fields = self.data.custom_fields[doctype]
# custom_fields can be a list or dict
if isinstance(custom_fields, dict):
custom_fields = [custom_fields]
for custom_field_detail in custom_fields:
custom_field_name = frappe.db.get_value(
"Custom Field", dict(dt=doctype, fieldname=custom_field_detail.get("fieldname"))
)
if custom_field_name:
frappe.delete_doc("Custom Field", custom_field_name)
def setup_roles(self):
"""Enable roles that are restricted to this domain"""
if self.data.restricted_roles:
user = frappe.get_doc("User", frappe.session.user)
for role_name in self.data.restricted_roles:
user.append("roles", {"role": role_name})
if not frappe.db.get_value("Role", role_name):
frappe.get_doc(dict(doctype="Role", role_name=role_name)).insert()
continue
role = frappe.get_doc("Role", role_name)
role.disabled = 0
role.save()
user.save()
def setup_data(self, domain=None):
"""Load domain info via hooks"""
self.data = frappe.get_domain_data(self.name)
def get_domain_data(self, module):
return frappe.get_attr(frappe.get_hooks("domains")[self.name] + ".data")
def set_default_portal_role(self):
"""Set default portal role based on domain"""
if self.data.get("default_portal_role"):
frappe.db.set_value(
"Portal Settings", None, "default_role", self.data.get("default_portal_role")
)
def setup_properties(self):
if self.data.properties:
for args in self.data.properties:
frappe.make_property_setter(args)
def set_values(self):
"""set values based on `data.set_value`"""
if self.data.set_value:
for args in self.data.set_value:
frappe.reload_doctype(args[0])
doc = frappe.get_doc(args[0], args[1] or args[0])
doc.set(args[2], args[3])
doc.save()
def setup_sidebar_items(self):
"""Enable / disable sidebar items"""
if self.data.allow_sidebar_items:
# disable all
frappe.db.sql("update `tabPortal Menu Item` set enabled=0")
# enable
frappe.db.sql(
"""update `tabPortal Menu Item` set enabled=1
where route in ({})""".format(
", ".join(f'"{d}"' for d in self.data.allow_sidebar_items)
)
)
if self.data.remove_sidebar_items:
# disable all
frappe.db.sql("update `tabPortal Menu Item` set enabled=1")
# enable
frappe.db.sql(
"""update `tabPortal Menu Item` set enabled=0
where route in ({})""".format(
", ".join(f'"{d}"' for d in self.data.remove_sidebar_items)
)
)
| mit | eaa39f417df36c7cd98643f28eaec0b3 | 28.946154 | 86 | 0.682507 | 3.159903 | false | false | false | false |
richardkiss/pycoin | tests/btc/segwit_test.py | 1 | 28347 | import unittest
from pycoin.encoding.bytes32 import to_bytes_32
from pycoin.encoding.hash import double_sha256
from pycoin.encoding.hexbytes import b2h, b2h_rev, h2b
from pycoin.symbols.btc import network
# BRAIN DAMAGE
Tx = network.tx
TxOut = network.tx.TxOut
SIGHASH_ALL = network.validator.flags.SIGHASH_ALL
SIGHASH_SINGLE = network.validator.flags.SIGHASH_SINGLE
SIGHASH_NONE = network.validator.flags.SIGHASH_NONE
SIGHASH_ANYONECANPAY = network.validator.flags.SIGHASH_ANYONECANPAY
class SegwitTest(unittest.TestCase):
def check_unsigned(self, tx):
for idx, txs_in in enumerate(tx.txs_in):
self.assertFalse(tx.is_solution_ok(idx))
def check_signed(self, tx):
for idx, txs_in in enumerate(tx.txs_in):
self.assertTrue(tx.is_solution_ok(idx))
def unsigned_copy(self, tx):
tx = Tx.from_hex(tx.as_hex())
for tx_in in tx.txs_in:
tx_in.script = b''
tx_in.witness = []
return tx
def check_tx_can_be_signed(self, tx_u, tx_s, private_keys=[], p2sh_values=[]):
tx_u_prime = self.unsigned_copy(tx_s)
tx_s_hex = tx_s.as_hex()
tx_u_prime.set_unspents(tx_s.unspents)
p2sh_lookup = network.tx.solve.build_p2sh_lookup([h2b(x) for x in p2sh_values])
hash160_lookup = network.tx.solve.build_hash160_lookup(private_keys)
tx_u_prime.sign(hash160_lookup=hash160_lookup, p2sh_lookup=p2sh_lookup)
self.check_signed(tx_u_prime)
tx_hex = tx_u_prime.as_hex()
self.assertEqual(tx_hex, tx_s_hex)
def test_segwit_ui(self):
# p2wpkh
address = 'bc1qqyykvamqq62n64t8gw09uw0cdgxjwwlw7mypam'
s = network.contract.for_address(address)
afs_address = network.address.for_script(s)
self.assertEqual(address, afs_address)
def test_segwit_create_tx(self):
key1 = network.keys.private(1)
coin_value = 5000000
script = network.contract.for_p2pkh_wit(key1.hash160())
tx_hash = b'\xee' * 32
tx_out_index = 0
spendable = Tx.Spendable(coin_value, script, tx_hash, tx_out_index)
key2 = network.keys.private(2)
tx = network.tx_utils.create_tx([spendable], [(key2.address(), coin_value)])
self.check_unsigned(tx)
network.tx_utils.sign_tx(tx, [key1.wif()])
self.check_signed(tx)
self.assertEqual(len(tx.txs_in[0].witness), 2)
s1 = network.contract.for_p2pkh(key1.hash160())
address = network.address.for_p2s_wit(s1)
spendable.script = network.contract.for_address(address)
tx = network.tx_utils.create_tx([spendable], [(key2.address(), coin_value)])
self.check_unsigned(tx)
network.tx_utils.sign_tx(tx, [key1.wif()], p2sh_lookup=network.tx.solve.build_p2sh_lookup([s1]))
self.check_signed(tx)
def test_issue_224(self):
RAWTX = (
"010000000002145fea0b000000001976a9144838d8b3588c4c7ba7c1d06f866e9b3739"
"c6303788ac0000000000000000346a32544553540000000a0000000000000001000000"
"0005f5e1000000000000000000000000000bebc2000032000000000000271000000000"
)
Tx.from_hex(RAWTX)
def check_bip143_tx(
self, tx_u_hex, tx_s_hex, txs_out_value_scripthex_pair, tx_in_count, tx_out_count, version, lock_time):
tx_u = Tx.from_hex(tx_u_hex)
tx_s = Tx.from_hex(tx_s_hex)
txs_out = [
TxOut(int(coin_value * 1e8), h2b(script_hex)) for coin_value, script_hex in txs_out_value_scripthex_pair
]
for tx in (tx_u, tx_s):
self.assertEqual(len(tx.txs_in), tx_in_count)
self.assertEqual(len(tx.txs_out), tx_out_count)
self.assertEqual(tx.version, version)
self.assertEqual(tx.lock_time, lock_time)
tx.set_unspents(txs_out)
self.check_unsigned(tx_u)
self.check_signed(tx_s)
tx_hex = tx_u.as_hex()
self.assertEqual(tx_hex, tx_u_hex)
tx_hex = tx_s.as_hex()
self.assertEqual(tx_hex, tx_s_hex)
tx_u_prime = self.unsigned_copy(tx_s)
tx_hex = tx_u_prime.as_hex()
self.assertEqual(tx_hex, tx_u_hex)
self.assertEqual(b2h_rev(double_sha256(h2b(tx_s_hex))), tx_s.w_id())
self.assertEqual(b2h_rev(double_sha256(h2b(tx_u_hex))), tx_u.w_id())
self.assertEqual(b2h_rev(double_sha256(h2b(tx_u_hex))), tx_u.id())
return tx_u, tx_s
# these examples are from BIP 143 at
# https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
def test_bip143_tx_1(self):
tx_u1, tx_s1 = self.check_bip143_tx(
"0100000002fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad"
"969f0000000000eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9"
"b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df3"
"78db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e"
"4dbe6a21b2d50ce2f0167faa815988ac11000000",
"01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4"
"e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3beb"
"f337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede9"
"44ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b"
"309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a914"
"8280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143b"
"de42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7"
"d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c45183315"
"61406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368d"
"a1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000",
[
(6.25, "2103c9f4836b9a4f77fc0d81f7bcb01b7f1b35916864b9476c241ce9fc198bd25432ac"),
(6, "00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1")
],
2,
2,
1,
17
)
sc = tx_s1.SolutionChecker(tx_s1)
self.assertEqual(b2h(sc._hash_prevouts(SIGHASH_ALL)),
"96b827c8483d4e9b96712b6713a7b68d6e8003a781feba36c31143470b4efd37")
self.assertEqual(b2h(sc._hash_sequence(SIGHASH_ALL)),
"52b0a642eea2fb7ae638c36f6252b6750293dbe574a806984b8e4d8548339a3b")
self.assertEqual(b2h(sc._hash_outputs(SIGHASH_ALL, 0)),
"863ef3e1a92afbfdb97f31ad0fc7683ee943e9abcf2501590ff8f6551f47e5e5")
script = network.contract.for_p2pkh(tx_s1.unspents[1].script[2:])
self.assertEqual(
b2h(sc._segwit_signature_preimage(script=script, tx_in_idx=1, hash_type=SIGHASH_ALL)),
"0100000096b827c8483d4e9b96712b6713a7b68d6e8003a781feba36c31143470b4efd"
"3752b0a642eea2fb7ae638c36f6252b6750293dbe574a806984b8e4d8548339a3bef51"
"e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000019"
"76a9141d0f172a0ecb48aee1be1f2687d2963ae33f71a188ac0046c32300000000ffff"
"ffff863ef3e1a92afbfdb97f31ad0fc7683ee943e9abcf2501590ff8f6551f47e5e511"
"00000001000000")
self.assertEqual(b2h(to_bytes_32(sc._signature_for_hash_type_segwit(script, 1, 1))),
"c37af31116d1b27caf68aae9e3ac82f1477929014d5b917657d0eb49478cb670")
self.check_tx_can_be_signed(tx_u1, tx_s1, [
0xbbc27228ddcb9209d7fd6f36b02f7dfa6252af40bb2f1cbc7a557da8027ff866,
0x619c335025c7f4012e556c2a58b2506e30b8511b53ade95ea316fd8c3286feb9
])
def test_bip143_tx_2(self):
tx_u2, tx_s2 = self.check_bip143_tx(
"0100000001db6b1b20aa0fd7b23880be2ecbd4a98130974cf4748fb66092ac4d3ceb1a"
"54770100000000feffffff02b8b4eb0b000000001976a914a457b684d7f0d539a46a45"
"bbc043f35b59d0d96388ac0008af2f000000001976a914fd270b1ee6abcaea97fea7ad"
"0402e8bd8ad6d77c88ac92040000",
"01000000000101db6b1b20aa0fd7b23880be2ecbd4a98130974cf4748fb66092ac4d3c"
"eb1a5477010000001716001479091972186c449eb1ded22b78e40d009bdf0089feffff"
"ff02b8b4eb0b000000001976a914a457b684d7f0d539a46a45bbc043f35b59d0d96388"
"ac0008af2f000000001976a914fd270b1ee6abcaea97fea7ad0402e8bd8ad6d77c88ac"
"02473044022047ac8e878352d3ebbde1c94ce3a10d057c24175747116f8288e5d794d1"
"2d482f0220217f36a485cae903c713331d877c1f64677e3622ad4010726870540656fe"
"9dcb012103ad1d8e89212f0b92c74d23bb710c00662ad1470198ac48c43f7d6f93a2a2"
"687392040000",
[(10, "a9144733f37cf4db86fbc2efed2500b4f4e49f31202387")],
1,
2,
1,
1170
)
self.check_tx_can_be_signed(
tx_u2, tx_s2, [0xeb696a065ef48a2192da5b28b694f87544b30fae8327c4510137a922f32c6dcf],
["001479091972186c449eb1ded22b78e40d009bdf0089"])
def test_bip143_tx_3(self):
tx_u3, tx_s3 = self.check_bip143_tx(
"0100000002fe3dc9208094f3ffd12645477b3dc56f60ec4fa8e6f5d67c565d1c6b9216"
"b36e0000000000ffffffff0815cf020f013ed6cf91d29f4202e8a58726b1ac6c79da47"
"c23d1bee0a6925f80000000000ffffffff0100f2052a010000001976a914a30741f814"
"5e5acadf23f751864167f32e0963f788ac00000000",
"01000000000102fe3dc9208094f3ffd12645477b3dc56f60ec4fa8e6f5d67c565d1c6b"
"9216b36e000000004847304402200af4e47c9b9629dbecc21f73af989bdaa911f7e6f6"
"c2e9394588a3aa68f81e9902204f3fcf6ade7e5abb1295b6774c8e0abd94ae62217367"
"096bc02ee5e435b67da201ffffffff0815cf020f013ed6cf91d29f4202e8a58726b1ac"
"6c79da47c23d1bee0a6925f80000000000ffffffff0100f2052a010000001976a914a3"
"0741f8145e5acadf23f751864167f32e0963f788ac000347304402200de66acf452778"
"9bfda55fc5459e214fa6083f936b430a762c629656216805ac0220396f550692cd3471"
"71cbc1ef1f51e15282e837bb2b30860dc77c8f78bc8501e503473044022027dc95ad6b"
"740fe5129e7e62a75dd00f291a2aeb1200b84b09d9e3789406b6c002201a9ecd315dd6"
"a0e632ab20bbb98948bc0c6fb204f2c286963bb48517a7058e27034721026dccc749ad"
"c2a9d0d89497ac511f760f45c47dc5ed9cf352a58ac706453880aeadab210255a9626a"
"ebf5e29c0e6538428ba0d1dcf6ca98ffdf086aa8ced5e0d0215ea465ac00000000",
[
(1.5625, "21036d5c20fa14fb2f635474c1dc4ef5909d4568e5569b79fc94d3448486e14685f8ac"),
(49, "00205d1b56b63d714eebe542309525f484b7e9d6f686b3781b6f61ef925d66d6f6a0")
],
2,
1,
1,
0
)
def test_bip143_tx_4(self):
tx_u4, tx_s4 = self.check_bip143_tx(
"0100000002e9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcf"
"c0010000000000ffffffff80e68831516392fcd100d186b3c2c7b95c80b53c77e77c35"
"ba03a66b429a2a1b0000000000ffffffff0280969800000000001976a914de4b231626"
"ef508c9a74a8517e6783c0546d6b2888ac80969800000000001976a9146648a8cd4531"
"e1ec47f35916de8e259237294d1e88ac00000000",
"01000000000102e9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba"
"7fcfc0010000000000ffffffff80e68831516392fcd100d186b3c2c7b95c80b53c77e7"
"7c35ba03a66b429a2a1b0000000000ffffffff0280969800000000001976a914de4b23"
"1626ef508c9a74a8517e6783c0546d6b2888ac80969800000000001976a9146648a8cd"
"4531e1ec47f35916de8e259237294d1e88ac02483045022100f6a10b8604e6dc910194"
"b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325"
"be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617"
"b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac0247304402200325"
"21802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2"
"188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68"
"210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac"
"00000000",
[
(0.16777215, "0020ba468eea561b26301e4cf69fa34bde4ad60c81e70f059f045ca9a79931004a4d"),
(0.16777215, "0020d9bbfbe56af7c4b7f960a70d7ea107156913d9e5a26b0a71429df5e097ca6537"),
],
2,
2,
1,
0
)
def test_bip143_tx_5(self):
tx_u5, tx_s5 = self.check_bip143_tx(
"010000000136641869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787"
"b96e0100000000ffffffff0200e9a435000000001976a914389ffce9cd9ae88dcc0631"
"e88a821ffdbe9bfe2688acc0832f05000000001976a9147480a33f950689af511e6e84"
"c138dbbd3c3ee41588ac00000000",
"0100000000010136641869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca2"
"9787b96e0100000023220020a16b5755f7f6f96dbd65f5f0d6ab9418b89af4b1f14a1b"
"b8a09062c35f0dcb54ffffffff0200e9a435000000001976a914389ffce9cd9ae88dcc"
"0631e88a821ffdbe9bfe2688acc0832f05000000001976a9147480a33f950689af511e"
"6e84c138dbbd3c3ee41588ac080047304402206ac44d672dac41f9b00e28f4df20c52e"
"eb087207e8d758d76d92c6fab3b73e2b0220367750dbbe19290069cba53d096f44530e"
"4f98acaa594810388cf7409a1870ce01473044022068c7946a43232757cbdf9176f009"
"a928e1cd9a1a8c212f15c1e11ac9f2925d9002205b75f937ff2f9f3c1246e547e54f62"
"e027f64eefa2695578cc6432cdabce271502473044022059ebf56d98010a932cf8ecfe"
"c54c48e6139ed6adb0728c09cbe1e4fa0915302e022007cd986c8fa870ff5d2b3a8913"
"9c9fe7e499259875357e20fcbb15571c76795403483045022100fbefd94bd0a488d50b"
"79102b5dad4ab6ced30c4069f1eaa69a4b5a763414067e02203156c6a5c9cf88f91265"
"f5a942e96213afae16d83321c8b31bb342142a14d16381483045022100a5263ea0553b"
"a89221984bd7f0b13613db16e7a70c549a86de0cc0444141a407022005c360ef0ae5a5"
"d4f9f2f87a56c1546cc8268cab08c73501d6b3be2e1e1a8a08824730440220525406a1"
"482936d5a21888260dc165497a90a15669636d8edca6b9fe490d309c022032af0c646a"
"34a44d1f4576bf6a4a74b67940f8faa84c7df9abe12a01a11e2b4783cf56210307b8ae"
"49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba32103b28f0c28"
"bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21034b8113d703"
"413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a21033400f6afecb8"
"33092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6d48b1131e94b"
"a04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b661b0b3302ee2"
"f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56ae00000000",
[(9.87654321, "a9149993a429037b5d912407a71c252019287b8d27a587")],
1,
2,
1,
0
)
tx_u5prime = self.unsigned_copy(tx_s5)
tx_s_hex = tx_s5.as_hex()
tx_u5prime.set_unspents(tx_s5.unspents)
ss = ["56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3"
"2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21"
"034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a2103"
"3400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6"
"d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b6"
"61b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56ae",
"0020a16b5755f7f6f96dbd65f5f0d6ab9418b89af4b1f14a1bb8a09062c35f0dcb54"]
p2sh_lookup = network.tx.solve.build_p2sh_lookup([h2b(x) for x in ss])
for se, sighash_type in [
(0x730fff80e1413068a05b57d6a58261f07551163369787f349438ea38ca80fac6, SIGHASH_ALL),
(0x11fa3d25a17cbc22b29c44a484ba552b5a53149d106d3d853e22fdd05a2d8bb3, SIGHASH_NONE),
(0x77bf4141a87d55bdd7f3cd0bdccf6e9e642935fec45f2f30047be7b799120661, SIGHASH_SINGLE),
(0x14af36970f5025ea3e8b5542c0f8ebe7763e674838d08808896b63c3351ffe49, SIGHASH_ANYONECANPAY | SIGHASH_ALL),
(0xfe9a95c19eef81dde2b95c1284ef39be497d128e2aa46916fb02d552485e0323, SIGHASH_ANYONECANPAY | SIGHASH_NONE),
(0x428a7aee9f0c2af0cd19af3cf1c78149951ea528726989b2e83e4778d2c3f890, SIGHASH_ANYONECANPAY | SIGHASH_SINGLE),
]:
tx_u5prime.sign(hash_type=sighash_type, hash160_lookup=network.tx.solve.build_hash160_lookup(
[se]), p2sh_lookup=p2sh_lookup)
self.check_signed(tx_u5prime)
tx_hex = tx_u5prime.as_hex()
self.assertEqual(tx_hex, tx_s_hex)
sc = tx_s5.SolutionChecker(tx_s5)
self.assertEqual(b2h(sc._hash_prevouts(SIGHASH_ALL)),
"74afdc312af5183c4198a40ca3c1a275b485496dd3929bca388c4b5e31f7aaa0")
self.assertEqual(b2h(sc._hash_sequence(SIGHASH_ALL)),
"3bb13029ce7b1f559ef5e747fcac439f1455a2ec7c5f09b72290795e70665044")
self.assertEqual(b2h(sc._hash_outputs(SIGHASH_ALL, 0)),
"bc4d309071414bed932f98832b27b4d76dad7e6c1346f487a8fdbb8eb90307cc")
self.assertEqual(b2h(sc._hash_outputs(SIGHASH_SINGLE, 0)),
"9efe0c13a6b16c14a41b04ebe6a63f419bdacb2f8705b494a43063ca3cd4f708")
script = tx_s5.txs_in[0].witness[-1]
self.assertEqual(
b2h(sc._segwit_signature_preimage(script=script, tx_in_idx=0, hash_type=SIGHASH_ALL)),
"0100000074afdc312af5183c4198a40ca3c1a275b485496dd3929bca388c4b5e31f7aa"
"a03bb13029ce7b1f559ef5e747fcac439f1455a2ec7c5f09b72290795e706650443664"
"1869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e01000000cf"
"56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3"
"2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21"
"034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a2103"
"3400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6"
"d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b6"
"61b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56aeb168de"
"3a00000000ffffffffbc4d309071414bed932f98832b27b4d76dad7e6c1346f487a8fd"
"bb8eb90307cc0000000001000000")
self.assertEqual(
b2h(sc._segwit_signature_preimage(script=script, tx_in_idx=0, hash_type=SIGHASH_NONE)),
"0100000074afdc312af5183c4198a40ca3c1a275b485496dd3929bca388c4b5e31f7aa"
"a000000000000000000000000000000000000000000000000000000000000000003664"
"1869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e01000000cf"
"56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3"
"2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21"
"034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a2103"
"3400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6"
"d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b6"
"61b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56aeb168de"
"3a00000000ffffffff0000000000000000000000000000000000000000000000000000"
"0000000000000000000002000000")
self.assertEqual(
b2h(sc._segwit_signature_preimage(script=script, tx_in_idx=0, hash_type=SIGHASH_SINGLE)),
"0100000074afdc312af5183c4198a40ca3c1a275b485496dd3929bca388c4b5e31f7aa"
"a000000000000000000000000000000000000000000000000000000000000000003664"
"1869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e01000000cf"
"56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3"
"2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21"
"034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a2103"
"3400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6"
"d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b6"
"61b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56aeb168de"
"3a00000000ffffffff9efe0c13a6b16c14a41b04ebe6a63f419bdacb2f8705b494a430"
"63ca3cd4f7080000000003000000")
self.assertEqual(
b2h(sc._segwit_signature_preimage(
script=script, tx_in_idx=0, hash_type=SIGHASH_ALL | SIGHASH_ANYONECANPAY)),
"0100000000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000000003664"
"1869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e01000000cf"
"56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3"
"2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21"
"034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a2103"
"3400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6"
"d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b6"
"61b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56aeb168de"
"3a00000000ffffffffbc4d309071414bed932f98832b27b4d76dad7e6c1346f487a8fd"
"bb8eb90307cc0000000081000000")
self.assertEqual(
b2h(sc._segwit_signature_preimage(
script=script, tx_in_idx=0, hash_type=SIGHASH_NONE | SIGHASH_ANYONECANPAY)),
"0100000000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000000003664"
"1869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e01000000cf"
"56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3"
"2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21"
"034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a2103"
"3400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6"
"d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b6"
"61b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56aeb168de"
"3a00000000ffffffff0000000000000000000000000000000000000000000000000000"
"0000000000000000000082000000")
self.assertEqual(
b2h(sc._segwit_signature_preimage(
script=script, tx_in_idx=0, hash_type=SIGHASH_SINGLE | SIGHASH_ANYONECANPAY)),
"0100000000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000000003664"
"1869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e01000000cf"
"56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3"
"2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21"
"034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a2103"
"3400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6"
"d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b6"
"61b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56aeb168de"
"3a00000000ffffffff9efe0c13a6b16c14a41b04ebe6a63f419bdacb2f8705b494a430"
"63ca3cd4f7080000000083000000")
tx = Tx.from_hex("010000000169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac83"
"87f14c1d000000ffffffff0101000000000000000000000000")
tx.set_witness(0, [h2b(x) for x in [
"30450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dc"
"c9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c5"
"3e01",
"02a9781d66b61fb5a7ef00ac5ad5bc6ffc78be7b44a566e3c87870e1079368df4c",
"ad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d8915"
"56dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae6"
"26c53e01"
]])
tx = Tx.from_hex(
"0100000000010169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1"
"ac8387f14c1d000000ffffffff01010000000000000000034830450220487fb382c497"
"4de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f8"
"45d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012102a9781d66b61f"
"b5a7ef00ac5ad5bc6ffc78be7b44a566e3c87870e1079368df4c4aad4830450220487f"
"b382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf9"
"5feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0100000000")
tx_hex = tx.as_hex()
print(tx)
print(tx_hex)
tx = Tx.from_hex(
"010000000169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac83"
"87f14c1d000000ffffffff0101000000000000000000000000")
self.assertEqual(
tx_hex,
"0100000000010169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1"
"ac8387f14c1d000000ffffffff01010000000000000000034830450220487fb382c497"
"4de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f8"
"45d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012102a9781d66b61f"
"b5a7ef00ac5ad5bc6ffc78be7b44a566e3c87870e1079368df4c4aad4830450220487f"
"b382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf9"
"5feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0100000000")
def test_bip143_tx_6(self):
tx_u6, tx_s6 = self.check_bip143_tx(
"010000000169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac83"
"87f14c1d000000ffffffff0101000000000000000000000000",
"0100000000010169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1"
"ac8387f14c1d000000ffffffff01010000000000000000034830450220487fb382c497"
"4de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f8"
"45d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012102a9781d66b61f"
"b5a7ef00ac5ad5bc6ffc78be7b44a566e3c87870e1079368df4c4aad4830450220487f"
"b382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf9"
"5feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0100000000",
[(0.002, "00209e1be07558ea5cc8e02ed1d80c0911048afad949affa36d5c3951e3159dbea19")],
1,
1,
1,
0
)
def test_bip143_tx_7(self):
tx_u7, tx_s7 = self.check_bip143_tx(
"01000000019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a66"
"28964c1d000000ffffffff0101000000000000000000000000",
"010000000001019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d4"
"7a6628964c1d000000ffffffff0101000000000000000007004830450220487fb382c4"
"974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48"
"f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286"
"f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f"
"17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960101022102"
"966f109c54e85d3aee8321301136cedeb9fc710fdef58a9de8a73942f8e567c021034f"
"fc99dd9a79dd3cb31e2ab3e0b09e0e67db41ac068c625cd1f491576016c84e9552af48"
"30450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dc"
"c9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c5"
"3e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1"
"db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f59409134"
"0c039596017500000000",
[(0.002, "00209b66c15b4e0b4eb49fa877982cafded24859fe5b0e2dbfbe4f0df1de7743fd52")],
1,
1,
1,
0
)
print(tx_s7.txs_in[0])
| mit | 337147f9c74cee95d3747dbba21317d3 | 57.689441 | 120 | 0.738738 | 2.374319 | false | false | false | false |
frappe/frappe | frappe/www/contact.py | 3 | 1784 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
from frappe import _
from frappe.utils import now
sitemap = 1
def get_context(context):
doc = frappe.get_doc("Contact Us Settings", "Contact Us Settings")
if doc.query_options:
query_options = [opt.strip() for opt in doc.query_options.replace(",", "\n").split("\n") if opt]
else:
query_options = ["Sales", "Support", "General"]
out = {"query_options": query_options, "parents": [{"name": _("Home"), "route": "/"}]}
out.update(doc.as_dict())
return out
max_communications_per_hour = 1000
@frappe.whitelist(allow_guest=True)
def send_message(subject="Website Query", message="", sender=""):
if not message:
frappe.response["message"] = "Please write something"
return
if not sender:
frappe.response["message"] = "Email Address Required"
return
# guest method, cap max writes per hour
if (
frappe.db.sql(
"""select count(*) from `tabCommunication`
where `sent_or_received`="Received"
and TIMEDIFF(%s, modified) < '01:00:00'""",
now(),
)[0][0]
> max_communications_per_hour
):
frappe.response[
"message"
] = "Sorry: we believe we have received an unreasonably high number of requests of this kind. Please try later"
return
# send email
forward_to_email = frappe.db.get_single_value("Contact Us Settings", "forward_to_email")
if forward_to_email:
frappe.sendmail(recipients=forward_to_email, sender=sender, content=message, subject=subject)
# add to to-do ?
frappe.get_doc(
dict(
doctype="Communication",
sender=sender,
subject=_("New Message from Website Contact Page"),
sent_or_received="Received",
content=message,
status="Open",
)
).insert(ignore_permissions=True)
return "okay"
| mit | 011457ffa98b66b3297c7672efab8647 | 24.485714 | 113 | 0.688341 | 3.180036 | false | false | false | false |
sqlalchemy/mako | mako/compat.py | 10 | 1913 | # mako/compat.py
# Copyright 2006-2022 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import collections
from importlib import util
import inspect
import sys
win32 = sys.platform.startswith("win")
pypy = hasattr(sys, "pypy_version_info")
py38 = sys.version_info >= (3, 8)
ArgSpec = collections.namedtuple(
"ArgSpec", ["args", "varargs", "keywords", "defaults"]
)
def inspect_getargspec(func):
"""getargspec based on fully vendored getfullargspec from Python 3.3."""
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
raise TypeError(f"{func!r} is not a Python function")
co = func.__code__
if not inspect.iscode(co):
raise TypeError(f"{co!r} is not a code object")
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
nargs += nkwargs
varargs = None
if co.co_flags & inspect.CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & inspect.CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return ArgSpec(args, varargs, varkw, func.__defaults__)
def load_module(module_id, path):
spec = util.spec_from_file_location(module_id, path)
module = util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def exception_as():
return sys.exc_info()[1]
def exception_name(exc):
return exc.__class__.__name__
if py38:
from importlib import metadata as importlib_metadata
else:
import importlib_metadata # noqa
def importlib_metadata_get(group):
ep = importlib_metadata.entry_points()
if hasattr(ep, "select"):
return ep.select(group=group)
else:
return ep.get(group, ())
| mit | db5326768883a91976d4bdbc5884a3ef | 24.171053 | 76 | 0.667538 | 3.516544 | false | false | false | false |
frappe/frappe | frappe/model/utils/link_count.py | 3 | 1522 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
ignore_doctypes = ("DocType", "Print Format", "Role", "Module Def", "Communication", "ToDo")
def notify_link_count(doctype, name):
"""updates link count for given document"""
if hasattr(frappe.local, "link_count"):
if (doctype, name) in frappe.local.link_count:
frappe.local.link_count[(doctype, name)] += 1
else:
frappe.local.link_count[(doctype, name)] = 1
def flush_local_link_count():
"""flush from local before ending request"""
if not getattr(frappe.local, "link_count", None):
return
link_count = frappe.cache().get_value("_link_count")
if not link_count:
link_count = {}
for key, value in frappe.local.link_count.items():
if key in link_count:
link_count[key] += frappe.local.link_count[key]
else:
link_count[key] = frappe.local.link_count[key]
frappe.cache().set_value("_link_count", link_count)
def update_link_count():
"""increment link count in the `idx` column for the given document"""
link_count = frappe.cache().get_value("_link_count")
if link_count:
for key, count in link_count.items():
if key[0] not in ignore_doctypes:
try:
frappe.db.sql(
f"update `tab{key[0]}` set idx = idx + {count} where name=%s",
key[1],
auto_commit=1,
)
except Exception as e:
if not frappe.db.is_table_missing(e): # table not found, single
raise e
# reset the count
frappe.cache().delete_value("_link_count")
| mit | 68f640b259afce789a87a9d062a41422 | 27.716981 | 92 | 0.665572 | 3.068548 | false | false | false | false |
klen/pylama | pylama/lint/pylama_pyflakes.py | 1 | 1804 | """Pyflakes support."""
from pyflakes import checker
from pylama.context import RunContext
from pylama.lint import LinterV2 as Abstract
m = checker.messages
CODES = {
m.UnusedImport.message: "W0611",
m.RedefinedWhileUnused.message: "W0404",
m.ImportShadowedByLoopVar.message: "W0621",
m.ImportStarUsed.message: "W0401",
m.ImportStarUsage.message: "W0401",
m.UndefinedName.message: "E0602",
m.DoctestSyntaxError.message: "W0511",
m.UndefinedExport.message: "E0603",
m.UndefinedLocal.message: "E0602",
m.DuplicateArgument.message: "E1122",
m.LateFutureImport.message: "W0410",
m.UnusedVariable.message: "W0612",
m.ReturnOutsideFunction.message: "E0104",
}
# RedefinedInListComp and ReturnWithArgsInsideGenerator were removed at pyflakes 2.5.0:
# https://github.com/PyCQA/pyflakes/commit/2246217295dc8cb30ef4a7b9d8dc449ce32e603a
if hasattr(m, "RedefinedInListComp"):
CODES[m.RedefinedInListComp.message] = "W0621"
if hasattr(m, "ReturnWithArgsInsideGenerator"):
CODES[m.ReturnWithArgsInsideGenerator.message] = "E0106"
class Linter(Abstract):
"""Pyflakes runner."""
name = "pyflakes"
def run_check(self, context: RunContext): # noqa
"""Check code with pyflakes."""
params = context.get_params("pyflakes")
builtins = params.get("builtins", "")
if builtins:
builtins = builtins.split(",")
check = checker.Checker(context.ast, context.filename, builtins=builtins)
for msg in check.messages:
context.push(
lnum=msg.lineno,
col=msg.col + 1,
text=msg.message % msg.message_args,
number=CODES.get(msg.message, ""),
source="pyflakes",
)
# pylama:ignore=E501,C0301
| mit | e77ef39cfa653b93b2b2a03566f471ed | 31.214286 | 87 | 0.666851 | 3.35316 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/ohlc/increasing/_line.py | 1 | 6987 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "ohlc.increasing"
_path_str = "ohlc.increasing.line"
_valid_props = {"color", "dash", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.ohlc.increasing.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.increasing.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.increasing.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | bae4476f80143421ccc138bcdf6658ec | 32.917476 | 82 | 0.527122 | 3.864491 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/carpet/_font.py | 1 | 8383 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "carpet"
_path_str = "carpet.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
The default font used for axis & tick labels on this carpet
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.carpet.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 1ddf46ad21c9064e9d4b39d760e09069 | 36.09292 | 82 | 0.557557 | 3.999523 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/bar/marker/_pattern.py | 1 | 19576 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Pattern(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "bar.marker"
_path_str = "bar.marker.pattern"
_valid_props = {
"bgcolor",
"bgcolorsrc",
"fgcolor",
"fgcolorsrc",
"fgopacity",
"fillmode",
"shape",
"shapesrc",
"size",
"sizesrc",
"solidity",
"soliditysrc",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
When there is no colorscale sets the color of background
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "overlay". Otherwise, defaults to a transparent
background.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# fgcolor
# -------
@property
def fgcolor(self):
"""
When there is no colorscale sets the color of foreground
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "replace". Otherwise, defaults to dark grey or
white to increase contrast with the `bgcolor`.
The 'fgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["fgcolor"]
@fgcolor.setter
def fgcolor(self, val):
self["fgcolor"] = val
# fgcolorsrc
# ----------
@property
def fgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `fgcolor`.
The 'fgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["fgcolorsrc"]
@fgcolorsrc.setter
def fgcolorsrc(self, val):
self["fgcolorsrc"] = val
# fgopacity
# ---------
@property
def fgopacity(self):
"""
Sets the opacity of the foreground pattern fill. Defaults to a
0.5 when `fillmode` is "overlay". Otherwise, defaults to 1.
The 'fgopacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fgopacity"]
@fgopacity.setter
def fgopacity(self, val):
self["fgopacity"] = val
# fillmode
# --------
@property
def fillmode(self):
"""
Determines whether `marker.color` should be used as a default
to `bgcolor` or a `fgcolor`.
The 'fillmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['replace', 'overlay']
Returns
-------
Any
"""
return self["fillmode"]
@fillmode.setter
def fillmode(self, val):
self["fillmode"] = val
# shape
# -----
@property
def shape(self):
"""
Sets the shape of the pattern fill. By default, no pattern is
used for filling the area.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', '/', '\\', 'x', '-', '|', '+', '.']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# shapesrc
# --------
@property
def shapesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shape`.
The 'shapesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shapesrc"]
@shapesrc.setter
def shapesrc(self, val):
self["shapesrc"] = val
# size
# ----
@property
def size(self):
"""
Sets the size of unit squares of the pattern fill in pixels,
which corresponds to the interval of repetition of the pattern.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# solidity
# --------
@property
def solidity(self):
"""
Sets the solidity of the pattern fill. Solidity is roughly the
fraction of the area filled by the pattern. Solidity of 0 shows
only the background color without pattern and solidty of 1
shows only the foreground color without pattern.
The 'solidity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["solidity"]
@solidity.setter
def solidity(self, val):
self["solidity"] = val
# soliditysrc
# -----------
@property
def soliditysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `solidity`.
The 'soliditysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["soliditysrc"]
@soliditysrc.setter
def soliditysrc(self, val):
self["soliditysrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bgcolorsrc=None,
fgcolor=None,
fgcolorsrc=None,
fgopacity=None,
fillmode=None,
shape=None,
shapesrc=None,
size=None,
sizesrc=None,
solidity=None,
soliditysrc=None,
**kwargs,
):
"""
Construct a new Pattern object
Sets the pattern within the marker.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.marker.Pattern`
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
Returns
-------
Pattern
"""
super(Pattern, self).__init__("pattern")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.bar.marker.Pattern
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.marker.Pattern`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("fgcolor", None)
_v = fgcolor if fgcolor is not None else _v
if _v is not None:
self["fgcolor"] = _v
_v = arg.pop("fgcolorsrc", None)
_v = fgcolorsrc if fgcolorsrc is not None else _v
if _v is not None:
self["fgcolorsrc"] = _v
_v = arg.pop("fgopacity", None)
_v = fgopacity if fgopacity is not None else _v
if _v is not None:
self["fgopacity"] = _v
_v = arg.pop("fillmode", None)
_v = fillmode if fillmode is not None else _v
if _v is not None:
self["fillmode"] = _v
_v = arg.pop("shape", None)
_v = shape if shape is not None else _v
if _v is not None:
self["shape"] = _v
_v = arg.pop("shapesrc", None)
_v = shapesrc if shapesrc is not None else _v
if _v is not None:
self["shapesrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("solidity", None)
_v = solidity if solidity is not None else _v
if _v is not None:
self["solidity"] = _v
_v = arg.pop("soliditysrc", None)
_v = soliditysrc if soliditysrc is not None else _v
if _v is not None:
self["soliditysrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 8541184263483bb313d498c914d57651 | 32.751724 | 82 | 0.556191 | 4.107428 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/_scattergeo.py | 1 | 86196 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Scattergeo(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "scattergeo"
_valid_props = {
"connectgaps",
"customdata",
"customdatasrc",
"featureidkey",
"fill",
"fillcolor",
"geo",
"geojson",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"lat",
"latsrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"locationmode",
"locations",
"locationssrc",
"lon",
"lonsrc",
"marker",
"meta",
"metasrc",
"mode",
"name",
"opacity",
"selected",
"selectedpoints",
"showlegend",
"stream",
"text",
"textfont",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
}
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# featureidkey
# ------------
@property
def featureidkey(self):
"""
Sets the key in GeoJSON features which is used as id to match
the items included in the `locations` array. Only has an effect
when `geojson` is set. Support nested property, for example
"properties.name".
The 'featureidkey' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["featureidkey"]
@featureidkey.setter
def featureidkey(self, val):
self["featureidkey"] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". "toself" connects the endpoints of the trace (or
each segment of the trace if it has gaps) into a closed shape.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself']
Returns
-------
Any
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# geo
# ---
@property
def geo(self):
"""
Sets a reference between this trace's geospatial coordinates
and a geographic map. If "geo" (the default value), the
geospatial coordinates refer to `layout.geo`. If "geo2", the
geospatial coordinates refer to `layout.geo2`, and so on.
The 'geo' property is an identifier of a particular
subplot, of type 'geo', that may be specified as the string 'geo'
optionally followed by an integer >= 1
(e.g. 'geo', 'geo1', 'geo2', 'geo3', etc.)
Returns
-------
str
"""
return self["geo"]
@geo.setter
def geo(self, val):
self["geo"] = val
# geojson
# -------
@property
def geojson(self):
"""
Sets optional GeoJSON data associated with this trace. If not
given, the features on the base map are used when `locations`
is set. It can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of type
"Polygon" or "MultiPolygon".
The 'geojson' property accepts values of any type
Returns
-------
Any
"""
return self["geojson"]
@geojson.setter
def geojson(self, val):
self["geojson"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lon', 'lat', 'location', 'text', 'name'] joined with '+' characters
(e.g. 'lon+lat')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
Returns
-------
plotly.graph_objs.scattergeo.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. The variables available in `hovertemplate`
are the ones emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (lon,lat) pair or
item in `locations`. If a single string, the same string
appears over all the data points. If an array of string, the
items are mapped in order to the this trace's (lon,lat) or
`locations` coordinates. To be seen, trace `hoverinfo` must
contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# lat
# ---
@property
def lat(self):
"""
Sets the latitude coordinates (in degrees North).
The 'lat' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
# latsrc
# ------
@property
def latsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `lat`.
The 'latsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["latsrc"]
@latsrc.setter
def latsrc(self, val):
self["latsrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.scattergeo.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# legendwidth
# -----------
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scattergeo.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# locationmode
# ------------
@property
def locationmode(self):
"""
Determines the set of locations used to match entries in
`locations` to regions on the map. Values "ISO-3", "USA-
states", *country names* correspond to features on the base map
and value "geojson-id" corresponds to features from a custom
GeoJSON linked to the `geojson` attribute.
The 'locationmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['ISO-3', 'USA-states', 'country names', 'geojson-id']
Returns
-------
Any
"""
return self["locationmode"]
@locationmode.setter
def locationmode(self, val):
self["locationmode"] = val
# locations
# ---------
@property
def locations(self):
"""
Sets the coordinates via location IDs or names. Coordinates
correspond to the centroid of each location given. See
`locationmode` for more info.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
# locationssrc
# ------------
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`locations`.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
# lon
# ---
@property
def lon(self):
"""
Sets the longitude coordinates (in degrees East).
The 'lon' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
# lonsrc
# ------
@property
def lonsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `lon`.
The 'lonsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lonsrc"]
@lonsrc.setter
def lonsrc(self, val):
self["lonsrc"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
angle
Sets the marker angle in respect to `angleref`.
angleref
Sets the reference for marker angle. With
"previous", angle 0 points along the line from
the previous point to this one. With "up",
angle 0 points toward the top of the screen.
With "north", angle 0 points north based on the
current map projection.
anglesrc
Sets the source reference on Chart Studio Cloud
for `angle`.
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color` is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color` is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scattergeo.marker.
ColorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use `marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
gradient
:class:`plotly.graph_objects.scattergeo.marker.
Gradient` instance or dict with compatible
properties
line
:class:`plotly.graph_objects.scattergeo.marker.
Line` instance or dict with compatible
properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color` is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color` is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
standoff
Moves the marker away from the data point in
the direction of `angle` (in px). This can be
useful for example if you have another marker
at this location and you want to point an
arrowhead marker at it.
standoffsrc
Sets the source reference on Chart Studio Cloud
for `standoff`.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for `symbol`.
Returns
-------
plotly.graph_objs.scattergeo.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover. If there are less than 20 points and the trace is not
stacked then the default is "lines+markers". Otherwise,
"lines".
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattergeo.selecte
d.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattergeo.selecte
d.Textfont` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattergeo.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.scattergeo.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (lon,lat) pair or item
in `locations`. If a single string, the same string appears
over all the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat) or `locations`
coordinates. If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in the
hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.scattergeo.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`textposition`.
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `lat`, `lon`, `location` and `text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattergeo.unselec
ted.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattergeo.unselec
ted.Textfont` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattergeo.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array. Only
has an effect when `geojson` is set. Support nested
property, for example "properties.name".
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
geo
Sets a reference between this trace's geospatial
coordinates and a geographic map. If "geo" (the default
value), the geospatial coordinates refer to
`layout.geo`. If "geo2", the geospatial coordinates
refer to `layout.geo2`, and so on.
geojson
Sets optional GeoJSON data associated with this trace.
If not given, the features on the base map are used
when `locations` is set. It can be set as a valid
GeoJSON object or as a URL string. Note that we only
accept GeoJSONs of type "FeatureCollection" or
"Feature" with geometries of type "Polygon" or
"MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scattergeo.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (lon,lat)
pair or item in `locations`. If a single string, the
same string appears over all the data points. If an
array of string, the items are mapped in order to the
this trace's (lon,lat) or `locations` coordinates. To
be seen, trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
`lat`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattergeo.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scattergeo.Line` instance
or dict with compatible properties
locationmode
Determines the set of locations used to match entries
in `locations` to regions on the map. Values "ISO-3",
"USA-states", *country names* correspond to features on
the base map and value "geojson-id" corresponds to
features from a custom GeoJSON linked to the `geojson`
attribute.
locations
Sets the coordinates via location IDs or names.
Coordinates correspond to the centroid of each location
given. See `locationmode` for more info.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
`lon`.
marker
:class:`plotly.graph_objects.scattergeo.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattergeo.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattergeo.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (lon,lat) pair
or item in `locations`. If a single string, the same
string appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (lon,lat) or `locations` coordinates. If trace
`hoverinfo` contains a "text" flag and "hovertext" is
not set, these elements will be seen in the hover
labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `lat`, `lon`, `location` and
`text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattergeo.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
featureidkey=None,
fill=None,
fillcolor=None,
geo=None,
geojson=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
lat=None,
latsrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
locationmode=None,
locations=None,
locationssrc=None,
lon=None,
lonsrc=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
**kwargs,
):
"""
Construct a new Scattergeo object
The data visualized as scatter point or lines on a geographic
map is provided either by longitude/latitude pairs in `lon` and
`lat` respectively or by geographic location IDs or names in
`locations`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Scattergeo`
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array. Only
has an effect when `geojson` is set. Support nested
property, for example "properties.name".
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
geo
Sets a reference between this trace's geospatial
coordinates and a geographic map. If "geo" (the default
value), the geospatial coordinates refer to
`layout.geo`. If "geo2", the geospatial coordinates
refer to `layout.geo2`, and so on.
geojson
Sets optional GeoJSON data associated with this trace.
If not given, the features on the base map are used
when `locations` is set. It can be set as a valid
GeoJSON object or as a URL string. Note that we only
accept GeoJSONs of type "FeatureCollection" or
"Feature" with geometries of type "Polygon" or
"MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scattergeo.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (lon,lat)
pair or item in `locations`. If a single string, the
same string appears over all the data points. If an
array of string, the items are mapped in order to the
this trace's (lon,lat) or `locations` coordinates. To
be seen, trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
`lat`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattergeo.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scattergeo.Line` instance
or dict with compatible properties
locationmode
Determines the set of locations used to match entries
in `locations` to regions on the map. Values "ISO-3",
"USA-states", *country names* correspond to features on
the base map and value "geojson-id" corresponds to
features from a custom GeoJSON linked to the `geojson`
attribute.
locations
Sets the coordinates via location IDs or names.
Coordinates correspond to the centroid of each location
given. See `locationmode` for more info.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
`lon`.
marker
:class:`plotly.graph_objects.scattergeo.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattergeo.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattergeo.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (lon,lat) pair
or item in `locations`. If a single string, the same
string appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (lon,lat) or `locations` coordinates. If trace
`hoverinfo` contains a "text" flag and "hovertext" is
not set, these elements will be seen in the hover
labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `lat`, `lon`, `location` and
`text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattergeo.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Scattergeo
"""
super(Scattergeo, self).__init__("scattergeo")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scattergeo
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Scattergeo`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("connectgaps", None)
_v = connectgaps if connectgaps is not None else _v
if _v is not None:
self["connectgaps"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("featureidkey", None)
_v = featureidkey if featureidkey is not None else _v
if _v is not None:
self["featureidkey"] = _v
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("geo", None)
_v = geo if geo is not None else _v
if _v is not None:
self["geo"] = _v
_v = arg.pop("geojson", None)
_v = geojson if geojson is not None else _v
if _v is not None:
self["geojson"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("lat", None)
_v = lat if lat is not None else _v
if _v is not None:
self["lat"] = _v
_v = arg.pop("latsrc", None)
_v = latsrc if latsrc is not None else _v
if _v is not None:
self["latsrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("legendwidth", None)
_v = legendwidth if legendwidth is not None else _v
if _v is not None:
self["legendwidth"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("locationmode", None)
_v = locationmode if locationmode is not None else _v
if _v is not None:
self["locationmode"] = _v
_v = arg.pop("locations", None)
_v = locations if locations is not None else _v
if _v is not None:
self["locations"] = _v
_v = arg.pop("locationssrc", None)
_v = locationssrc if locationssrc is not None else _v
if _v is not None:
self["locationssrc"] = _v
_v = arg.pop("lon", None)
_v = lon if lon is not None else _v
if _v is not None:
self["lon"] = _v
_v = arg.pop("lonsrc", None)
_v = lonsrc if lonsrc is not None else _v
if _v is not None:
self["lonsrc"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("mode", None)
_v = mode if mode is not None else _v
if _v is not None:
self["mode"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("selected", None)
_v = selected if selected is not None else _v
if _v is not None:
self["selected"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("unselected", None)
_v = unselected if unselected is not None else _v
if _v is not None:
self["unselected"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "scattergeo"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 640160c1532754010a4988bb1c957caf | 35.065272 | 100 | 0.560328 | 4.568128 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/validators/image/hoverlabel/_font.py | 1 | 1858 | import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="image.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
""",
),
**kwargs,
)
| mit | 780216b84ba17676b99011cd0a862a1d | 39.391304 | 85 | 0.531755 | 4.764103 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scattercarpet/marker/_gradient.py | 1 | 7894 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Gradient(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattercarpet.marker"
_path_str = "scattercarpet.marker.gradient"
_valid_props = {"color", "colorsrc", "type", "typesrc"}
# color
# -----
@property
def color(self):
"""
Sets the final color of the gradient fill: the center color for
radial, the right for horizontal, or the bottom for vertical.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# type
# ----
@property
def type(self):
"""
Sets the type of gradient used to fill the markers
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['radial', 'horizontal', 'vertical', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# typesrc
# -------
@property
def typesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `type`.
The 'typesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["typesrc"]
@typesrc.setter
def typesrc(self, val):
self["typesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, type=None, typesrc=None, **kwargs
):
"""
Construct a new Gradient object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattercarpet.
marker.Gradient`
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
Returns
-------
Gradient
"""
super(Gradient, self).__init__("gradient")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.marker.Gradient
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.marker.Gradient`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("typesrc", None)
_v = typesrc if typesrc is not None else _v
if _v is not None:
self["typesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | e446ea58332c5e6ab00cbc33f92db446 | 32.449153 | 84 | 0.546618 | 4.126503 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/validators/choroplethmapbox/colorbar/title/_font.py | 1 | 1570 | import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="font",
parent_name="choroplethmapbox.colorbar.title",
**kwargs,
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs,
)
| mit | df4765302216879cb2bca54180965334 | 36.380952 | 68 | 0.53121 | 4.498567 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scatterpolargl/hoverlabel/_font.py | 1 | 11226 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl.hoverlabel"
_path_str = "scatterpolargl.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolargl
.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 2b823d9c1f8a44d732e80a5aa02bb34f | 33.018182 | 82 | 0.553269 | 4.057102 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scatterpolargl/marker/colorbar/_tickfont.py | 1 | 8529 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl.marker.colorbar"
_path_str = "scatterpolargl.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolargl
.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 317a63b4858f2cf528e5d5f8d870a9fc | 36.572687 | 84 | 0.56302 | 4.002346 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/splom/_diagonal.py | 1 | 2702 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Diagonal(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom"
_path_str = "splom.diagonal"
_valid_props = {"visible"}
# visible
# -------
@property
def visible(self):
"""
Determines whether or not subplots on the diagonal are
displayed.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
visible
Determines whether or not subplots on the diagonal are
displayed.
"""
def __init__(self, arg=None, visible=None, **kwargs):
"""
Construct a new Diagonal object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.Diagonal`
visible
Determines whether or not subplots on the diagonal are
displayed.
Returns
-------
Diagonal
"""
super(Diagonal, self).__init__("diagonal")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.Diagonal
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.Diagonal`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | fdc5d2907de088d02f903b55a3bf4ecf | 25.490196 | 82 | 0.507032 | 4.666667 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scattersmith/marker/_gradient.py | 1 | 7888 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Gradient(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattersmith.marker"
_path_str = "scattersmith.marker.gradient"
_valid_props = {"color", "colorsrc", "type", "typesrc"}
# color
# -----
@property
def color(self):
"""
Sets the final color of the gradient fill: the center color for
radial, the right for horizontal, or the bottom for vertical.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# type
# ----
@property
def type(self):
"""
Sets the type of gradient used to fill the markers
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['radial', 'horizontal', 'vertical', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# typesrc
# -------
@property
def typesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `type`.
The 'typesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["typesrc"]
@typesrc.setter
def typesrc(self, val):
self["typesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, type=None, typesrc=None, **kwargs
):
"""
Construct a new Gradient object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.marker.Gradient`
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
Returns
-------
Gradient
"""
super(Gradient, self).__init__("gradient")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattersmith.marker.Gradient
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.marker.Gradient`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("typesrc", None)
_v = typesrc if typesrc is not None else _v
if _v is not None:
self["typesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 3510ff79756ff6e58a8ffc32d0bc8257 | 32.423729 | 84 | 0.5464 | 4.101924 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/splom/marker/_colorbar.py | 1 | 78169 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom.marker"
_path_str = "splom.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# orientation
# -----------
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.splom.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.splom.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.splom.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.splom.marker.c
olorbar.tickformatstopdefaults), sets the default property
values to use for elements of
splom.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.splom.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklabelstep
# -------------
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h". Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.splom.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use splom.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use splom.marker.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when `orientation`
if "v" and defaults to "right" when `orientation` if "h". Note
that the title's location used to be set by the now deprecated
`titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
Defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h".
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
Defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h".
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.splom.marker.co
lorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.splom.
marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
splom.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.splom.marker.colorbar.Titl
e` instance or dict with compatible properties
titlefont
Deprecated: Please use splom.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use splom.marker.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
x
Sets the x position of the color bar (in plot
fraction). Defaults to 1.02 when `orientation` is "v"
and 0.5 when `orientation` is "h".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction). Defaults to 0.5 when `orientation` is "v"
and 1.02 when `orientation` is "h".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.splom.marker.co
lorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.splom.
marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
splom.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.splom.marker.colorbar.Titl
e` instance or dict with compatible properties
titlefont
Deprecated: Please use splom.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use splom.marker.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
x
Sets the x position of the color bar (in plot
fraction). Defaults to 1.02 when `orientation` is "v"
and 0.5 when `orientation` is "h".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction). Defaults to 0.5 when `orientation` is "v"
and 1.02 when `orientation` is "h".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("orientation", None)
_v = orientation if orientation is not None else _v
if _v is not None:
self["orientation"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklabelstep", None)
_v = ticklabelstep if ticklabelstep is not None else _v
if _v is not None:
self["ticklabelstep"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 38f143c039c057d1ccf7cb1ed30d7ba2 | 35.089104 | 98 | 0.558342 | 4.183964 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/image/legendgrouptitle/_font.py | 1 | 8452 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "image.legendgrouptitle"
_path_str = "image.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.image.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.image.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.image.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 597c7e457d32961642b55ac70d323161 | 36.23348 | 82 | 0.559749 | 4.011391 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scatter3d/_error_z.py | 1 | 18691 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorZ(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d"
_path_str = "scatter3d.error_z"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `array`.
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs,
):
"""
Construct a new ErrorZ object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.ErrorZ`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorZ
"""
super(ErrorZ, self).__init__("error_z")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.ErrorZ
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.ErrorZ`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
_v = array if array is not None else _v
if _v is not None:
self["array"] = _v
_v = arg.pop("arrayminus", None)
_v = arrayminus if arrayminus is not None else _v
if _v is not None:
self["arrayminus"] = _v
_v = arg.pop("arrayminussrc", None)
_v = arrayminussrc if arrayminussrc is not None else _v
if _v is not None:
self["arrayminussrc"] = _v
_v = arg.pop("arraysrc", None)
_v = arraysrc if arraysrc is not None else _v
if _v is not None:
self["arraysrc"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("symmetric", None)
_v = symmetric if symmetric is not None else _v
if _v is not None:
self["symmetric"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("traceref", None)
_v = traceref if traceref is not None else _v
if _v is not None:
self["traceref"] = _v
_v = arg.pop("tracerefminus", None)
_v = tracerefminus if tracerefminus is not None else _v
if _v is not None:
self["tracerefminus"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
_v = arg.pop("valueminus", None)
_v = valueminus if valueminus is not None else _v
if _v is not None:
self["valueminus"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | ce19ab4639be894791941e07d5611e32 | 30.048173 | 82 | 0.545824 | 4.363997 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/validators/pie/hoverlabel/_font.py | 1 | 1856 | import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="pie.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
""",
),
**kwargs,
)
| mit | 232527628de815d35c63d2bc1d675906 | 39.347826 | 83 | 0.53125 | 4.758974 | false | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/validators/layout/_shapes.py | 1 | 9626 | import _plotly_utils.basevalidators
class ShapesValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="shapes", parent_name="layout", **kwargs):
super(ShapesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Shape"),
data_docs=kwargs.pop(
"data_docs",
"""
editable
Determines whether the shape could be activated
for edit or not. Has no effect when the older
editable shapes mode is enabled via
`config.editable` or
`config.edits.shapePosition`.
fillcolor
Sets the color filling the shape's interior.
Only applies to closed shapes.
fillrule
Determines which regions of complex paths
constitute the interior. For more info please
visit https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
layer
Specifies whether shapes are drawn below or
above traces.
line
:class:`plotly.graph_objects.layout.shape.Line`
instance or dict with compatible properties
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
opacity
Sets the opacity of the shape.
path
For `type` "path" - a valid SVG path with the
pixel values replaced by data values in
`xsizemode`/`ysizemode` being "scaled" and
taken unmodified as pixels relative to
`xanchor` and `yanchor` in case of "pixel" size
mode. There are a few restrictions / quirks
only absolute instructions, not relative. So
the allowed segments are: M, L, H, V, Q, C, T,
S, and Z arcs (A) are not allowed because
radius rx and ry are relative. In the future we
could consider supporting relative commands,
but we would have to decide on how to handle
date and log axes. Note that even as is, Q and
C Bezier paths that are smooth on linear axes
may not be smooth on log, and vice versa. no
chained "polybezier" commands - specify the
segment type for each one. On category axes,
values are numbers scaled to the serial numbers
of categories because using the categories
themselves there would be no way to describe
fractional positions On data axes: because
space and T are both normal components of path
strings, we can't use either to separate date
from time parts. Therefore we'll use underscore
for this purpose: 2015-02-21_13:45:56.789
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
type
Specifies the shape type to be drawn. If
"line", a line is drawn from (`x0`,`y0`) to
(`x1`,`y1`) with respect to the axes' sizing
mode. If "circle", a circle is drawn from
((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius
(|(`x0`+`x1`)/2 - `x0`|, |(`y0`+`y1`)/2
-`y0`)|) with respect to the axes' sizing mode.
If "rect", a rectangle is drawn linking
(`x0`,`y0`), (`x1`,`y0`), (`x1`,`y1`),
(`x0`,`y1`), (`x0`,`y0`) with respect to the
axes' sizing mode. If "path", draw a custom SVG
path using `path`. with respect to the axes'
sizing mode.
visible
Determines whether or not this shape is
visible.
x0
Sets the shape's starting x position. See
`type` and `xsizemode` for more info.
x1
Sets the shape's end x position. See `type` and
`xsizemode` for more info.
xanchor
Only relevant in conjunction with `xsizemode`
set to "pixel". Specifies the anchor point on
the x axis to which `x0`, `x1` and x
coordinates within `path` are relative to. E.g.
useful to attach a pixel sized shape to a
certain data value. No effect when `xsizemode`
not set to "pixel".
xref
Sets the shape's x coordinate axis. If set to a
x axis id (e.g. "x" or "x2"), the `x` position
refers to a x coordinate. If set to "paper",
the `x` position refers to the distance from
the left of the plotting area in normalized
coordinates where 0 (1) corresponds to the left
(right). If set to a x axis ID followed by
"domain" (separated by a space), the position
behaves like for "paper", but refers to the
distance in fractions of the domain length from
the left of the domain of that axis: e.g., *x2
domain* refers to the domain of the second x
axis and a x position of 0.5 refers to the
point between the left and the right of the
domain of the second x axis.
xsizemode
Sets the shapes's sizing mode along the x axis.
If set to "scaled", `x0`, `x1` and x
coordinates within `path` refer to data values
on the x axis or a fraction of the plot area's
width (`xref` set to "paper"). If set to
"pixel", `xanchor` specifies the x position in
terms of data or plot fraction but `x0`, `x1`
and x coordinates within `path` are pixels
relative to `xanchor`. This way, the shape can
have a fixed width while maintaining a position
relative to data or plot fraction.
y0
Sets the shape's starting y position. See
`type` and `ysizemode` for more info.
y1
Sets the shape's end y position. See `type` and
`ysizemode` for more info.
yanchor
Only relevant in conjunction with `ysizemode`
set to "pixel". Specifies the anchor point on
the y axis to which `y0`, `y1` and y
coordinates within `path` are relative to. E.g.
useful to attach a pixel sized shape to a
certain data value. No effect when `ysizemode`
not set to "pixel".
yref
Sets the shape's y coordinate axis. If set to a
y axis id (e.g. "y" or "y2"), the `y` position
refers to a y coordinate. If set to "paper",
the `y` position refers to the distance from
the bottom of the plotting area in normalized
coordinates where 0 (1) corresponds to the
bottom (top). If set to a y axis ID followed by
"domain" (separated by a space), the position
behaves like for "paper", but refers to the
distance in fractions of the domain length from
the bottom of the domain of that axis: e.g.,
*y2 domain* refers to the domain of the second
y axis and a y position of 0.5 refers to the
point between the bottom and the top of the
domain of the second y axis.
ysizemode
Sets the shapes's sizing mode along the y axis.
If set to "scaled", `y0`, `y1` and y
coordinates within `path` refer to data values
on the y axis or a fraction of the plot area's
height (`yref` set to "paper"). If set to
"pixel", `yanchor` specifies the y position in
terms of data or plot fraction but `y0`, `y1`
and y coordinates within `path` are pixels
relative to `yanchor`. This way, the shape can
have a fixed height while maintaining a
position relative to data or plot fraction.
""",
),
**kwargs,
)
| mit | d9361218568f86f201ac406a0bef6885 | 50.752688 | 77 | 0.536775 | 4.873924 | false | false | false | false |
alphagov/notifications-admin | tests/app/notify_client/test_broadcast_message_client.py | 1 | 3150 | from app.notify_client.broadcast_message_api_client import BroadcastMessageAPIClient
def test_create_broadcast_message(mocker):
client = BroadcastMessageAPIClient()
mocker.patch("app.notify_client.current_user", id="1")
mock_post = mocker.patch("app.notify_client.broadcast_message_api_client.BroadcastMessageAPIClient.post")
client.create_broadcast_message(
service_id="12345",
template_id="67890",
content=None,
reference=None,
)
mock_post.assert_called_once_with(
"/service/12345/broadcast-message",
data={
"service_id": "12345",
"template_id": "67890",
"personalisation": {},
"created_by": "1",
},
)
def test_get_broadcast_messages(mocker):
client = BroadcastMessageAPIClient()
mock_get = mocker.patch("app.notify_client.broadcast_message_api_client.BroadcastMessageAPIClient.get")
client.get_broadcast_messages("12345")
mock_get.assert_called_once_with(
"/service/12345/broadcast-message",
)
def test_get_broadcast_message(mocker):
client = BroadcastMessageAPIClient()
mocker.patch("app.notify_client.current_user", id="1")
mock_get = mocker.patch(
"app.notify_client.broadcast_message_api_client.BroadcastMessageAPIClient.get",
return_value={"abc": "def"},
)
mock_redis_set = mocker.patch("app.extensions.RedisClient.set")
client.get_broadcast_message(service_id="12345", broadcast_message_id="67890")
mock_get.assert_called_once_with(
"/service/12345/broadcast-message/67890",
)
mock_redis_set.assert_called_once_with(
"service-12345-broadcast-message-67890",
'{"abc": "def"}',
ex=604_800,
)
def test_update_broadcast_message(mocker):
client = BroadcastMessageAPIClient()
mocker.patch("app.notify_client.current_user", id="1")
mock_post = mocker.patch("app.notify_client.broadcast_message_api_client.BroadcastMessageAPIClient.post")
mock_redis_delete = mocker.patch("app.extensions.RedisClient.delete")
client.update_broadcast_message(
service_id="12345",
broadcast_message_id="67890",
data={"abc": "def"},
)
mock_post.assert_called_once_with(
"/service/12345/broadcast-message/67890",
data={"abc": "def"},
)
mock_redis_delete.assert_called_once_with("service-12345-broadcast-message-67890")
def test_update_broadcast_message_status(mocker):
client = BroadcastMessageAPIClient()
mocker.patch("app.notify_client.current_user", id="1")
mock_post = mocker.patch("app.notify_client.broadcast_message_api_client.BroadcastMessageAPIClient.post")
mock_redis_delete = mocker.patch("app.extensions.RedisClient.delete")
client.update_broadcast_message_status(
"cancelled",
service_id="12345",
broadcast_message_id="67890",
)
mock_post.assert_called_once_with(
"/service/12345/broadcast-message/67890/status",
data={"created_by": "1", "status": "cancelled"},
)
mock_redis_delete.assert_called_once_with("service-12345-broadcast-message-67890")
| mit | 8efae7b1a22574189f664d2c961fd2d0 | 36.5 | 109 | 0.673333 | 3.575482 | false | true | false | false |
alphagov/notifications-admin | app/models/job.py | 1 | 7279 | from datetime import timedelta
import pytz
from notifications_utils.letter_timings import (
CANCELLABLE_JOB_LETTER_STATUSES,
get_letter_timings,
letter_can_be_cancelled,
)
from notifications_utils.timezones import utc_string_to_aware_gmt_datetime
from werkzeug.utils import cached_property
from app.models import JSONModel, ModelList, PaginatedModelList
from app.notify_client.job_api_client import job_api_client
from app.notify_client.notification_api_client import notification_api_client
from app.notify_client.service_api_client import service_api_client
from app.utils import set_status_filters
from app.utils.letters import get_letter_printing_statement
from app.utils.time import is_less_than_days_ago
class Job(JSONModel):
ALLOWED_PROPERTIES = {
"id",
"service",
"template_name",
"template_version",
"original_file_name",
"created_at",
"notification_count",
"created_by",
"template_type",
"recipient",
}
__sort_attribute__ = "original_file_name"
@classmethod
def from_id(cls, job_id, service_id):
return cls(job_api_client.get_job(service_id, job_id)["data"])
@property
def status(self):
return self._dict.get("job_status")
@property
def cancelled(self):
return self.status == "cancelled"
@property
def scheduled(self):
return self.status == "scheduled"
@property
def scheduled_for(self):
return self._dict.get("scheduled_for")
@property
def upload_type(self):
return self._dict.get("upload_type")
@property
def pdf_letter(self):
return self.upload_type == "letter"
@property
def processing_started(self):
if not self._dict.get("processing_started"):
return None
return self._dict["processing_started"]
def _aggregate_statistics(self, *statuses):
return sum(
outcome["count"] for outcome in self._dict["statistics"] if not statuses or outcome["status"] in statuses
)
@property
def notifications_delivered(self):
return self._aggregate_statistics("delivered", "sent")
@property
def notifications_failed(self):
return self._aggregate_statistics(
"failed",
"technical-failure",
"temporary-failure",
"permanent-failure",
"cancelled",
)
@property
def notifications_requested(self):
return self._aggregate_statistics()
@property
def notifications_sent(self):
return self.notifications_delivered + self.notifications_failed
@property
def notifications_sending(self):
if self.scheduled:
return 0
return self.notification_count - self.notifications_sent
@property
def notifications_created(self):
return notification_api_client.get_notification_count_for_job_id(service_id=self.service, job_id=self.id)
@property
def still_processing(self):
return self.status != "finished" or self.percentage_complete < 100
@cached_property
def finished_processing(self):
return self.notification_count == self.notifications_sent
@property
def awaiting_processing_or_recently_processed(self):
if not self.processing_started:
# Assume that if processing hasn’t started yet then the job
# must have been created recently enough to not have any
# notifications yet
return True
return is_less_than_days_ago(self.processing_started, 1)
@property
def template_id(self):
return self._dict["template"]
@cached_property
def template(self):
return service_api_client.get_service_template(
service_id=self.service,
template_id=self.template_id,
version=self.template_version,
)["data"]
@property
def percentage_complete(self):
return self.notifications_requested / self.notification_count * 100
@property
def letter_job_can_be_cancelled(self):
if self.template["template_type"] != "letter":
return False
if any(self.uncancellable_notifications):
return False
if not letter_can_be_cancelled(
"created", utc_string_to_aware_gmt_datetime(self.created_at).replace(tzinfo=None)
):
return False
return True
@property
def letter_printing_statement(self):
if self.upload_type != "letter_day":
raise TypeError()
return get_letter_printing_statement(
"created",
# We have to make the time just before 5:30pm because a
# letter uploaded at 5:30pm will be printed the next day
(utc_string_to_aware_gmt_datetime(self.created_at) - timedelta(minutes=1)).astimezone(pytz.utc).isoformat(),
long_form=False,
)
@cached_property
def all_notifications(self):
return self.get_notifications(set_status_filters({}))["notifications"]
@property
def uncancellable_notifications(self):
return (n for n in self.all_notifications if n["status"] not in CANCELLABLE_JOB_LETTER_STATUSES)
@cached_property
def postage(self):
# There might be no notifications if the job has only just been
# created and the tasks haven't run yet
try:
return self.all_notifications[0]["postage"]
except IndexError:
return self.template["postage"]
@property
def letter_timings(self):
return get_letter_timings(self.created_at, postage=self.postage)
@property
def failure_rate(self):
if not self.notifications_delivered:
return 100 if self.notifications_failed else 0
return self.notifications_failed / (self.notifications_failed + self.notifications_delivered) * 100
@property
def high_failure_rate(self):
return self.failure_rate > 30
def get_notifications(self, status):
return notification_api_client.get_notifications_for_service(
self.service,
self.id,
status=status,
)
def cancel(self):
if self.template_type == "letter":
return job_api_client.cancel_letter_job(self.service, self.id)
else:
return job_api_client.cancel_job(self.service, self.id)
class ImmediateJobs(ModelList):
client_method = job_api_client.get_immediate_jobs
model = Job
class ScheduledJobs(ImmediateJobs):
client_method = job_api_client.get_scheduled_jobs
class PaginatedJobs(PaginatedModelList, ImmediateJobs):
client_method = job_api_client.get_page_of_jobs
statuses = None
def __init__(self, service_id, *, contact_list_id=None, page=None, limit_days=None):
super().__init__(
service_id,
contact_list_id=contact_list_id,
statuses=self.statuses,
page=page,
limit_days=limit_days,
)
class PaginatedJobsAndScheduledJobs(PaginatedJobs):
statuses = job_api_client.NON_CANCELLED_JOB_STATUSES
class PaginatedUploads(PaginatedModelList, ImmediateJobs):
client_method = job_api_client.get_uploads
| mit | 8cb5639831297bf5ddf7caedd32d8c45 | 28.946502 | 120 | 0.646695 | 4.095104 | false | false | false | false |
alphagov/notifications-admin | app/notify_client/notification_api_client.py | 1 | 4310 | from app.notify_client import NotifyAdminAPIClient, _attach_current_user
class NotificationApiClient(NotifyAdminAPIClient):
def get_notifications_for_service(
self,
service_id,
job_id=None,
template_type=None,
status=None,
page=None,
page_size=None,
count_pages=None,
limit_days=None,
include_jobs=None,
include_from_test_key=None,
format_for_csv=None,
to=None,
include_one_off=None,
):
params = {
"page": page,
"page_size": page_size,
"template_type": template_type,
"status": status,
"include_jobs": include_jobs,
"include_from_test_key": include_from_test_key,
"format_for_csv": format_for_csv,
"to": to,
"include_one_off": include_one_off,
"count_pages": count_pages,
}
params = {k: v for k, v in params.items() if v is not None}
# if `to` is set it is likely PII like an email address or mobile which
# we do not want in our logs, so we do a POST request instead of a GET
method = self.post if to else self.get
kwargs = {"data": params} if to else {"params": params}
if job_id:
return method(url="/service/{}/job/{}/notifications".format(service_id, job_id), **kwargs)
else:
if limit_days is not None:
params["limit_days"] = limit_days
return method(url="/service/{}/notifications".format(service_id), **kwargs)
def send_notification(self, service_id, *, template_id, recipient, personalisation, sender_id):
data = {
"template_id": template_id,
"to": recipient,
"personalisation": personalisation,
}
if sender_id:
data["sender_id"] = sender_id
data = _attach_current_user(data)
return self.post(url="/service/{}/send-notification".format(service_id), data=data)
def send_precompiled_letter(self, service_id, filename, file_id, postage, recipient_address):
data = {"filename": filename, "file_id": file_id, "postage": postage, "recipient_address": recipient_address}
data = _attach_current_user(data)
return self.post(url="/service/{}/send-pdf-letter".format(service_id), data=data)
def get_notification(self, service_id, notification_id):
return self.get(url="/service/{}/notifications/{}".format(service_id, notification_id))
def get_api_notifications_for_service(self, service_id):
ret = self.get_notifications_for_service(
service_id, include_jobs=False, include_from_test_key=True, include_one_off=False, count_pages=False
)
return self.map_letters_to_accepted(ret)
@staticmethod
def map_letters_to_accepted(notifications):
for notification in notifications["notifications"]:
if notification["notification_type"] == "letter":
if notification["status"] in ("created", "sending"):
notification["status"] = "accepted"
if notification["status"] in ("delivered", "returned-letter"):
notification["status"] = "received"
return notifications
def get_notification_letter_preview(self, service_id, notification_id, file_type, page=None):
get_url = "/service/{}/template/preview/{}/{}{}".format(
service_id, notification_id, file_type, "?page={}".format(page) if page else ""
)
return self.get(url=get_url)
def update_notification_to_cancelled(self, service_id, notification_id):
return self.post(url="/service/{}/notifications/{}/cancel".format(service_id, notification_id), data={})
def get_notification_status_by_service(self, start_date, end_date):
return self.get(
url="service/monthly-data-by-service",
params={
"start_date": str(start_date),
"end_date": str(end_date),
},
)
def get_notification_count_for_job_id(self, *, service_id, job_id):
return self.get(url="/service/{}/job/{}/notification_count".format(service_id, job_id))["count"]
notification_api_client = NotificationApiClient()
| mit | 91499ee4e3a037591a612d06b3989420 | 38.907407 | 117 | 0.59884 | 3.950504 | false | false | false | false |
alphagov/notifications-admin | tests/app/main/views/test_broadcast.py | 1 | 89009 | import json
import uuid
from collections import namedtuple
from functools import partial
import pytest
from flask import url_for
from freezegun import freeze_time
from tests import broadcast_message_json, sample_uuid, user_json
from tests.app.broadcast_areas.custom_polygons import BRISTOL, SKYE
from tests.conftest import (
SERVICE_ONE_ID,
create_active_user_approve_broadcasts_permissions,
create_active_user_create_broadcasts_permissions,
create_active_user_view_permissions,
create_platform_admin_user,
normalize_spaces,
)
sample_uuid = sample_uuid()
@pytest.mark.parametrize(
"endpoint, extra_args, expected_get_status, expected_post_status",
(
(
".broadcast_dashboard",
{},
403,
405,
),
(
".broadcast_dashboard_updates",
{},
403,
405,
),
(
".broadcast_dashboard_previous",
{},
403,
405,
),
(
".new_broadcast",
{},
403,
403,
),
(
".write_new_broadcast",
{},
403,
403,
),
(
".broadcast",
{"template_id": sample_uuid},
403,
405,
),
(
".preview_broadcast_areas",
{"broadcast_message_id": sample_uuid},
403,
405,
),
(
".choose_broadcast_library",
{"broadcast_message_id": sample_uuid},
403,
405,
),
(
".choose_broadcast_area",
{"broadcast_message_id": sample_uuid, "library_slug": "countries"},
403,
403,
),
(
".remove_broadcast_area",
{"broadcast_message_id": sample_uuid, "area_slug": "countries-E92000001"},
403,
405,
),
(
".preview_broadcast_message",
{"broadcast_message_id": sample_uuid},
403,
403,
),
(
".view_current_broadcast",
{"broadcast_message_id": sample_uuid},
403,
403,
),
(
".view_previous_broadcast",
{"broadcast_message_id": sample_uuid},
403,
405,
),
(
".cancel_broadcast_message",
{"broadcast_message_id": sample_uuid},
403,
403,
),
),
)
def test_broadcast_pages_403_without_permission(
client_request,
endpoint,
extra_args,
expected_get_status,
expected_post_status,
):
client_request.get(endpoint, service_id=SERVICE_ONE_ID, _expected_status=expected_get_status, **extra_args)
client_request.post(endpoint, service_id=SERVICE_ONE_ID, _expected_status=expected_post_status, **extra_args)
@pytest.mark.parametrize("user_is_platform_admin", [True, False])
@pytest.mark.parametrize(
"endpoint, extra_args, expected_get_status, expected_post_status",
(
(
".new_broadcast",
{},
403,
403,
),
(
".write_new_broadcast",
{},
403,
403,
),
(
".broadcast",
{"template_id": sample_uuid},
403,
405,
),
(
".preview_broadcast_areas",
{"broadcast_message_id": sample_uuid},
403,
405,
),
(
".choose_broadcast_library",
{"broadcast_message_id": sample_uuid},
403,
405,
),
(
".choose_broadcast_area",
{"broadcast_message_id": sample_uuid, "library_slug": "countries"},
403,
403,
),
(
".remove_broadcast_area",
{"broadcast_message_id": sample_uuid, "area_slug": "england"},
403,
405,
),
(
".preview_broadcast_message",
{"broadcast_message_id": sample_uuid},
403,
403,
),
),
)
def test_broadcast_pages_403_for_user_without_permission(
client_request,
service_one,
active_user_view_permissions,
platform_admin_user_no_service_permissions,
endpoint,
extra_args,
expected_get_status,
expected_post_status,
user_is_platform_admin,
):
"""
Checks that users without permissions, including admin users, cannot create or edit broadcasts.
"""
service_one["permissions"] += ["broadcast"]
if user_is_platform_admin:
client_request.login(platform_admin_user_no_service_permissions)
else:
client_request.login(active_user_view_permissions)
client_request.get(endpoint, service_id=SERVICE_ONE_ID, _expected_status=expected_get_status, **extra_args)
client_request.post(endpoint, service_id=SERVICE_ONE_ID, _expected_status=expected_post_status, **extra_args)
@pytest.mark.parametrize(
"user",
[
create_active_user_view_permissions(),
create_platform_admin_user(),
create_active_user_create_broadcasts_permissions(),
],
)
def test_user_cannot_accept_broadcast_without_permission(
client_request,
service_one,
user,
):
service_one["permissions"] += ["broadcast"]
client_request.login(user)
client_request.post(
".approve_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=sample_uuid,
_expected_status=403,
)
@pytest.mark.parametrize("user_is_platform_admin", [True, False])
def test_user_cannot_reject_broadcast_without_permission(
client_request,
service_one,
active_user_view_permissions,
platform_admin_user_no_service_permissions,
user_is_platform_admin,
):
service_one["permissions"] += ["broadcast"]
if user_is_platform_admin:
client_request.login(platform_admin_user_no_service_permissions)
else:
client_request.login(active_user_view_permissions)
client_request.get(
".reject_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=sample_uuid,
_expected_status=403,
)
def test_user_cannot_cancel_broadcast_without_permission(
client_request,
service_one,
active_user_view_permissions,
):
"""
separate test for cancel_broadcast endpoint, because admin users are allowed to cancel broadcasts
"""
service_one["permissions"] += ["broadcast"]
client_request.get(
".cancel_broadcast_message",
service_id=SERVICE_ONE_ID,
_expected_status=403,
**{"broadcast_message_id": sample_uuid},
)
client_request.post(
".cancel_broadcast_message",
service_id=SERVICE_ONE_ID,
_expected_status=403,
**{"broadcast_message_id": sample_uuid},
)
@pytest.mark.parametrize(
"endpoint, step_index, expected_link_text, expected_link_href",
(
(".broadcast_tour", 1, "Continue", partial(url_for, ".broadcast_tour", step_index=2)),
(".broadcast_tour", 2, "Continue", partial(url_for, ".broadcast_tour", step_index=3)),
(".broadcast_tour", 3, "Continue", partial(url_for, ".broadcast_tour", step_index=4)),
(".broadcast_tour", 4, "Continue", partial(url_for, ".broadcast_tour", step_index=5)),
(".broadcast_tour", 5, "Continue", partial(url_for, ".service_dashboard")),
(".broadcast_tour", 6, "Continue", partial(url_for, ".service_dashboard")),
(".broadcast_tour_live", 1, "Continue", partial(url_for, ".broadcast_tour_live", step_index=2)),
(".broadcast_tour_live", 2, "Continue", partial(url_for, ".service_dashboard")),
),
)
def test_broadcast_tour_pages_have_continue_link(
client_request,
service_one,
endpoint,
step_index,
expected_link_text,
expected_link_href,
):
service_one["permissions"] += ["broadcast"]
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
step_index=step_index,
)
link = page.select_one(".banner-tour a")
assert normalize_spaces(link.text) == expected_link_text
assert link["href"] == expected_link_href(service_id=SERVICE_ONE_ID)
@pytest.mark.parametrize(
"endpoint, step_index",
(
pytest.param(".broadcast_tour", 1, marks=pytest.mark.xfail),
pytest.param(".broadcast_tour", 2, marks=pytest.mark.xfail),
pytest.param(".broadcast_tour", 3, marks=pytest.mark.xfail),
pytest.param(".broadcast_tour", 4, marks=pytest.mark.xfail),
(".broadcast_tour", 5),
(".broadcast_tour", 6),
(".broadcast_tour_live", 1),
(".broadcast_tour_live", 2),
),
)
def test_some_broadcast_tour_pages_show_service_name(
client_request,
service_one,
endpoint,
step_index,
):
service_one["permissions"] += ["broadcast"]
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
step_index=step_index,
)
assert normalize_spaces(page.select_one(".navigation-service").text).startswith("service one Training")
@pytest.mark.parametrize(
"trial_mode, channel, allowed_broadcast_provider, selector, expected_text, expected_tagged_text",
(
(
True,
None,
"all",
".navigation-service-type.navigation-service-type--training",
"service one Training Switch service",
"Training",
),
(
True,
"test",
"all",
".navigation-service-type.navigation-service-type--training",
"service one Training Switch service",
"Training",
),
(
False,
"severe",
"all",
".navigation-service-type.navigation-service-type--live",
"service one Live Switch service",
"Live",
),
(
False,
"operator",
"all",
".navigation-service-type.navigation-service-type--operator",
"service one Operator Switch service",
"Operator",
),
(
False,
"operator",
"vodafone",
".navigation-service-type.navigation-service-type--operator",
"service one Operator (Vodafone) Switch service",
"Operator (Vodafone)",
),
(
False,
"test",
"all",
".navigation-service-type.navigation-service-type--test",
"service one Test Switch service",
"Test",
),
(
False,
"test",
"vodafone",
".navigation-service-type.navigation-service-type--test",
"service one Test (Vodafone) Switch service",
"Test (Vodafone)",
),
(
False,
"government",
"all",
".navigation-service-type.navigation-service-type--government",
"service one Government Switch service",
"Government",
),
(
False,
"government",
"vodafone",
".navigation-service-type.navigation-service-type--government",
"service one Government (Vodafone) Switch service",
"Government (Vodafone)",
),
(
False,
"severe",
"vodafone",
".navigation-service-type.navigation-service-type--live",
"service one Live (Vodafone) Switch service",
"Live (Vodafone)",
),
),
)
def test_broadcast_service_shows_channel_settings(
client_request,
service_one,
mock_get_no_broadcast_messages,
trial_mode,
allowed_broadcast_provider,
channel,
selector,
expected_text,
expected_tagged_text,
):
service_one["allowed_broadcast_provider"] = allowed_broadcast_provider
service_one["permissions"] += ["broadcast"]
service_one["restricted"] = trial_mode
service_one["broadcast_channel"] = channel
page = client_request.get(
".broadcast_dashboard",
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one(".navigation-service").text) == (expected_text)
assert normalize_spaces(page.select_one(".navigation-service").select_one(selector).text) == (expected_tagged_text)
@pytest.mark.parametrize(
"endpoint, step_index",
(
(".broadcast_tour", 0),
(".broadcast_tour", 7),
(".broadcast_tour_live", 0),
(".broadcast_tour_live", 3),
),
)
def test_broadcast_tour_page_404s_out_of_range(
client_request,
service_one,
endpoint,
step_index,
):
service_one["permissions"] += ["broadcast"]
client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
step_index=step_index,
_expected_status=404,
)
def test_dashboard_redirects_to_broadcast_dashboard(
client_request,
service_one,
):
service_one["permissions"] += ["broadcast"]
client_request.get(
".service_dashboard",
service_id=SERVICE_ONE_ID,
_expected_redirect=url_for(
".broadcast_dashboard",
service_id=SERVICE_ONE_ID,
),
),
def test_empty_broadcast_dashboard(
client_request,
service_one,
mock_get_no_broadcast_messages,
):
service_one["permissions"] += ["broadcast"]
page = client_request.get(
".broadcast_dashboard",
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one("h1").text) == ("Current alerts")
assert [normalize_spaces(row.text) for row in page.select(".table-empty-message")] == [
"You do not have any current alerts",
]
@pytest.mark.parametrize(
"user",
[
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
],
)
@freeze_time("2020-02-20 02:20")
def test_broadcast_dashboard(
client_request,
service_one,
mock_get_broadcast_messages,
user,
):
service_one["permissions"] += ["broadcast"]
client_request.login(user)
page = client_request.get(
".broadcast_dashboard",
service_id=SERVICE_ONE_ID,
)
assert len(page.select(".ajax-block-container")) == len(page.select("h1")) == 1
assert [normalize_spaces(row.text) for row in page.select(".ajax-block-container")[0].select(".file-list")] == [
"Half an hour ago This is a test Waiting for approval England Scotland",
"Hour and a half ago This is a test Waiting for approval England Scotland",
"Example template This is a test Live since today at 2:20am England Scotland",
"Example template This is a test Live since today at 1:20am England Scotland",
]
@pytest.mark.parametrize(
"user",
[
create_platform_admin_user(),
create_active_user_view_permissions(),
create_active_user_approve_broadcasts_permissions(),
],
)
@pytest.mark.parametrize(
"endpoint",
(
".broadcast_dashboard",
".broadcast_dashboard_previous",
".broadcast_dashboard_rejected",
),
)
def test_broadcast_dashboard_does_not_have_button_if_user_does_not_have_permission_to_create_broadcast(
client_request,
service_one,
mock_get_broadcast_messages,
endpoint,
user,
):
client_request.login(user)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
)
assert not page.select("a.govuk-button")
@pytest.mark.parametrize(
"endpoint",
(
".broadcast_dashboard",
".broadcast_dashboard_previous",
".broadcast_dashboard_rejected",
),
)
def test_broadcast_dashboard_has_new_alert_button_if_user_has_permission_to_create_broadcasts(
client_request,
service_one,
mock_get_broadcast_messages,
active_user_create_broadcasts_permission,
endpoint,
):
client_request.login(active_user_create_broadcasts_permission)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
)
button = page.select_one(".js-stick-at-bottom-when-scrolling a.govuk-button.govuk-button--secondary")
assert normalize_spaces(button.text) == "New alert"
assert button["href"] == url_for(
"main.new_broadcast",
service_id=SERVICE_ONE_ID,
)
@freeze_time("2020-02-20 02:20")
def test_broadcast_dashboard_json(
client_request,
service_one,
mock_get_broadcast_messages,
):
service_one["permissions"] += ["broadcast"]
response = client_request.get_response(
".broadcast_dashboard_updates",
service_id=SERVICE_ONE_ID,
)
json_response = json.loads(response.get_data(as_text=True))
assert json_response.keys() == {"current_broadcasts"}
assert "Waiting for approval" in json_response["current_broadcasts"]
assert "Live since today at 2:20am" in json_response["current_broadcasts"]
@pytest.mark.parametrize(
"user",
[
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
],
)
@freeze_time("2020-02-20 02:20")
def test_previous_broadcasts_page(
client_request,
service_one,
mock_get_broadcast_messages,
user,
):
service_one["permissions"] += ["broadcast"]
client_request.login(user)
page = client_request.get(
".broadcast_dashboard_previous",
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one("main h1").text) == ("Past alerts")
assert len(page.select(".ajax-block-container")) == 1
assert [normalize_spaces(row.text) for row in page.select(".ajax-block-container")[0].select(".file-list")] == [
"Example template This is a test Yesterday at 2:20pm England Scotland",
"Example template This is a test Yesterday at 2:20am England Scotland",
]
@pytest.mark.parametrize(
"user",
[
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
],
)
@freeze_time("2020-02-20 02:20")
def test_rejected_broadcasts_page(
client_request,
service_one,
mock_get_broadcast_messages,
user,
):
service_one["permissions"] += ["broadcast"]
client_request.login(user)
page = client_request.get(
".broadcast_dashboard_rejected",
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one("main h1").text) == ("Rejected alerts")
assert len(page.select(".ajax-block-container")) == 1
assert [normalize_spaces(row.text) for row in page.select(".ajax-block-container")[0].select(".file-list")] == [
"Example template This is a test Today at 1:20am England Scotland",
]
def test_new_broadcast_page(
client_request,
service_one,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".new_broadcast",
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one("h1").text) == "New alert"
form = page.select_one("form")
assert form["method"] == "post"
assert "action" not in form
assert [
(
choice.select_one("input")["name"],
choice.select_one("input")["value"],
normalize_spaces(choice.select_one("label").text),
)
for choice in form.select(".govuk-radios__item")
] == [
("content", "freeform", "Write your own message"),
("content", "template", "Use a template"),
]
@pytest.mark.parametrize(
"value, expected_redirect_endpoint",
(
("freeform", "main.write_new_broadcast"),
("template", "main.choose_template"),
),
)
def test_new_broadcast_page_redirects(
client_request,
service_one,
active_user_create_broadcasts_permission,
value,
expected_redirect_endpoint,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
".new_broadcast",
service_id=SERVICE_ONE_ID,
_data={
"content": value,
},
_expected_redirect=url_for(
expected_redirect_endpoint,
service_id=SERVICE_ONE_ID,
),
)
def test_write_new_broadcast_page(
client_request,
service_one,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".write_new_broadcast",
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one("h1").text) == "New alert"
form = page.select_one("form")
assert form["method"] == "post"
assert "action" not in form
assert normalize_spaces(page.select_one("label[for=name]").text) == "Reference"
assert page.select_one("input[type=text]")["name"] == "name"
assert normalize_spaces(page.select_one("label[for=template_content]").text) == "Message"
assert page.select_one("textarea")["name"] == "template_content"
assert page.select_one("textarea")["data-notify-module"] == "enhanced-textbox"
assert page.select_one("textarea")["data-highlight-placeholders"] == "false"
assert (page.select_one("[data-notify-module=update-status]")["data-updates-url"]) == url_for(
".count_content_length",
service_id=SERVICE_ONE_ID,
template_type="broadcast",
)
assert (
(page.select_one("[data-notify-module=update-status]")["data-target"])
== (page.select_one("textarea")["id"])
== ("template_content")
)
assert (page.select_one("[data-notify-module=update-status]")["aria-live"]) == ("polite")
def test_write_new_broadcast_posts(
client_request,
service_one,
mock_create_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
".write_new_broadcast",
service_id=SERVICE_ONE_ID,
_data={
"name": "My new alert",
"template_content": "This is a test",
},
_expected_redirect=url_for(
".choose_broadcast_library",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
),
)
mock_create_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
reference="My new alert",
content="This is a test",
template_id=None,
)
@pytest.mark.parametrize(
"content, expected_error_message",
(
("", "Cannot be empty"),
("ŵ" * 616, "Content must be 615 characters or fewer because it contains ŵ"),
("w" * 1_396, "Content must be 1,395 characters or fewer"),
("hello ((name))", "You can’t use ((double brackets)) to personalise this message"),
),
)
def test_write_new_broadcast_bad_content(
client_request,
service_one,
mock_create_broadcast_message,
active_user_create_broadcasts_permission,
content,
expected_error_message,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.post(
".write_new_broadcast",
service_id=SERVICE_ONE_ID,
_data={
"name": "My new alert",
"template_content": content,
},
_expected_status=200,
)
assert normalize_spaces(page.select_one(".error-message").text) == (expected_error_message)
assert mock_create_broadcast_message.called is False
def test_broadcast_page(
client_request,
service_one,
fake_uuid,
mock_create_broadcast_message,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
client_request.get(
".broadcast",
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_expected_redirect=url_for(
".choose_broadcast_library",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
),
),
@pytest.mark.parametrize(
"areas_selected, areas_listed, estimates",
(
(
[
"ctry19-E92000001",
"ctry19-S92000003",
],
[
"England Remove England",
"Scotland Remove Scotland",
],
[
"An area of 100,000 square miles Will get the alert",
"An extra area of 6,000 square miles is Likely to get the alert",
"40,000,000 phones estimated",
],
),
(
[
"wd21-E05003224",
"wd21-E05003225",
"wd21-E05003227",
"wd21-E05003228",
"wd21-E05003229",
],
[
"Penrith Carleton Remove Penrith Carleton",
"Penrith East Remove Penrith East",
"Penrith Pategill Remove Penrith Pategill",
"Penrith South Remove Penrith South",
"Penrith West Remove Penrith West",
],
[
"An area of 4 square miles Will get the alert",
"An extra area of 10 square miles is Likely to get the alert",
"9,000 to 10,000 phones",
],
),
(
[
"lad21-E09000019",
],
[
"Islington Remove Islington",
],
[
"An area of 6 square miles Will get the alert",
"An extra area of 4 square miles is Likely to get the alert",
"200,000 to 600,000 phones",
],
),
(
[
"ctyua21-E10000019",
],
[
"Lincolnshire Remove Lincolnshire",
],
[
"An area of 2,000 square miles Will get the alert",
"An extra area of 500 square miles is Likely to get the alert",
"500,000 to 600,000 phones",
],
),
(
["ctyua21-E10000019", "ctyua21-E10000023"],
[
"Lincolnshire Remove Lincolnshire",
"North Yorkshire Remove North Yorkshire",
],
[
"An area of 6,000 square miles Will get the alert",
"An extra area of 1,000 square miles is Likely to get the alert",
"1,000,000 phones estimated",
],
),
(
[
"pfa20-E23000035",
],
[
"Devon & Cornwall Remove Devon & Cornwall",
],
[
"An area of 4,000 square miles Will get the alert",
"An extra area of 800 square miles is Likely to get the alert",
"1,000,000 phones estimated",
],
),
(
[
"pfa20-LONDON",
],
[
"London (Metropolitan & City of London) Remove London (Metropolitan & City of London)",
],
[
"An area of 600 square miles Will get the alert",
"An extra area of 70 square miles is Likely to get the alert",
"6,000,000 phones estimated",
],
),
),
)
def test_preview_broadcast_areas_page(
mocker,
client_request,
service_one,
fake_uuid,
areas_selected,
areas_listed,
estimates,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status="draft",
area_ids=areas_selected,
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".preview_broadcast_areas",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [normalize_spaces(item.text) for item in page.select("ul.area-list li.area-list-item")] == areas_listed
assert len(page.select("#area-list-map")) == 1
assert [normalize_spaces(item.text) for item in page.select(".area-list-key")] == estimates
@pytest.mark.parametrize(
"polygons, expected_list_items",
(
(
[
[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]],
],
[
"An area of 800 square miles Will get the alert",
"An extra area of 2,000 square miles is Likely to get the alert",
"Unknown number of phones",
],
),
(
[BRISTOL],
[
"An area of 4 square miles Will get the alert",
"An extra area of 3 square miles is Likely to get the alert",
"70,000 to 100,000 phones",
],
),
(
[SKYE],
[
"An area of 2,000 square miles Will get the alert",
"An extra area of 600 square miles is Likely to get the alert",
"3,000 to 4,000 phones",
],
),
),
)
def test_preview_broadcast_areas_page_with_custom_polygons(
mocker,
client_request,
service_one,
fake_uuid,
polygons,
expected_list_items,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status="draft",
areas={
"names": ["Area one", "Area two", "Area three"],
"simple_polygons": polygons,
},
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".preview_broadcast_areas",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [normalize_spaces(item.text) for item in page.select("ul.area-list li.area-list-item")] == [
"Area one Remove Area one",
"Area two Remove Area two",
"Area three Remove Area three",
]
assert len(page.select("#area-list-map")) == 1
assert [normalize_spaces(item.text) for item in page.select(".area-list-key")] == expected_list_items
@pytest.mark.parametrize(
"area_ids, expected_list",
(
(
[],
[
"Countries",
"Local authorities",
"Police forces in England and Wales",
"Test areas",
],
),
(
[
# Countries have no parent areas
"ctry19-E92000001",
"ctry19-S92000003",
],
[
"Countries",
"Local authorities",
"Police forces in England and Wales",
"Test areas",
],
),
(
[
# If you’ve chosen the whole of a county or unitary authority
# there’s no reason to also pick districts of it
"ctyua21-E10000013", # Gloucestershire, a county
"lad21-E06000052", # Cornwall, a unitary authority
],
[
"Countries",
"Local authorities",
"Police forces in England and Wales",
"Test areas",
],
),
(
[
"wd21-E05004299", # Pitville, in Cheltenham, in Gloucestershire
"wd21-E05004290", # Benhall and the Reddings, in Cheltenham, in Gloucestershire
"wd21-E05010951", # Abbeymead, in Gloucester, in Gloucestershire
"wd21-S13002775", # Shetland Central, in Shetland Isles
"lad21-E07000037", # High Peak, a district in Derbyshire
],
[
"Cheltenham",
"Derbyshire",
"Gloucester",
"Gloucestershire",
"Shetland Islands",
# ---
"Countries",
"Local authorities",
"Police forces in England and Wales",
"Test areas",
],
),
),
)
def test_choose_broadcast_library_page(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
area_ids,
expected_list,
):
service_one["permissions"] += ["broadcast"]
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status="draft",
area_ids=area_ids,
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".choose_broadcast_library",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [normalize_spaces(title.text) for title in page.select("main a.govuk-link")] == expected_list
assert normalize_spaces(page.select(".file-list-hint-large")[0].text) == (
"England, Northern Ireland, Scotland and Wales"
)
assert page.select_one("a.file-list-filename-large.govuk-link")["href"] == url_for(
".choose_broadcast_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="ctry19",
)
def test_suggested_area_has_correct_link(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status="draft",
area_ids=[
"wd21-E05004299", # Pitville, a ward of Cheltenham
],
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".choose_broadcast_library",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
link = page.select_one("main a.govuk-link")
assert link.text == "Cheltenham"
assert link["href"] == url_for(
"main.choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
area_slug="lad21-E07000078",
)
@pytest.mark.parametrize(
"library_slug, expected_page_title",
(
(
"ctry19",
"Choose countries",
),
("wd21-lad21-ctyua21", "Choose a local authority"),
("pfa20", "Choose police forces in England and Wales"),
(
"test",
"Choose test areas",
),
),
)
def test_choose_broadcast_area_page_titles(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
library_slug,
expected_page_title,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".choose_broadcast_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug=library_slug,
)
assert normalize_spaces(page.select_one("h1").text) == expected_page_title
def test_choose_broadcast_area_page(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".choose_broadcast_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="ctry19",
)
assert [
(
choice.select_one("input")["value"],
normalize_spaces(choice.select_one("label").text),
)
for choice in page.select("form[method=post] .govuk-checkboxes__item")
] == [
("ctry19-E92000001", "England"),
("ctry19-N92000002", "Northern Ireland"),
("ctry19-S92000003", "Scotland"),
("ctry19-W92000004", "Wales"),
]
def test_choose_broadcast_area_page_for_area_with_sub_areas(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".choose_broadcast_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
)
assert normalize_spaces(page.select_one("h1").text) == ("Choose a local authority")
live_search = page.select_one("[data-notify-module=live-search]")
assert live_search["data-targets"] == ".file-list-item"
assert live_search.select_one("input")["type"] == "search"
partial_url_for = partial(
url_for,
"main.choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
)
choices = [
(
choice.select_one("a.file-list-filename-large")["href"],
normalize_spaces(choice.text),
)
for choice in page.select(".file-list-item")
]
assert len(choices) == 398
# First item, somewhere in Scotland
assert choices[0] == (
partial_url_for(area_slug="lad21-S12000033"),
"Aberdeen City",
)
# Somewhere in England
# ---
# Note: we don't populate prev_area_slug query param, so the back link will come here rather than to a county page,
# even though ashford belongs to kent
assert choices[12] == (
partial_url_for(area_slug="lad21-E07000105"),
"Ashford",
)
# Somewhere in Wales
assert choices[219] == (
partial_url_for(area_slug="lad21-W06000021"),
"Monmouthshire",
)
# Somewhere in Northern Ireland
assert choices[95] == (
partial_url_for(area_slug="lad21-N09000005"),
"Derry City and Strabane",
)
# Last item on the page
assert choices[-1] == (
partial_url_for(area_slug="lad21-E06000014"),
"York",
)
def test_choose_broadcast_sub_area_page_for_district_shows_checkboxes_for_wards(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
"main.choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
area_slug="lad21-S12000033",
)
assert normalize_spaces(page.select_one("h1").text) == ("Choose an area of Aberdeen City")
live_search = page.select_one("[data-notify-module=live-search]")
assert live_search["data-targets"] == "#sub-areas .govuk-checkboxes__item"
assert live_search.select_one("input")["type"] == "search"
all_choices = [
(
choice.select_one("input")["value"],
normalize_spaces(choice.select_one("label").text),
)
for choice in page.select("form[method=post] .govuk-checkboxes__item")
]
sub_choices = [
(
choice.select_one("input")["value"],
normalize_spaces(choice.select_one("label").text),
)
for choice in page.select("form[method=post] #sub-areas .govuk-checkboxes__item")
]
assert all_choices[:3] == [
("y", "All of Aberdeen City"),
("wd21-S13002845", "Airyhall/Broomhill/Garthdee"),
("wd21-S13002836", "Bridge of Don"),
]
assert sub_choices[:3] == [
("wd21-S13002845", "Airyhall/Broomhill/Garthdee"),
("wd21-S13002836", "Bridge of Don"),
("wd21-S13002835", "Dyce/Bucksburn/Danestone"),
]
assert (
all_choices[-1:]
== sub_choices[-1:]
== [
("wd21-S13002846", "Torry/Ferryhill"),
]
)
@pytest.mark.parametrize(
"prev_area_slug, expected_back_link_url, expected_back_link_extra_kwargs",
[
("ctyua21-E10000016", "main.choose_broadcast_sub_area", {"area_slug": "ctyua21-E10000016"}), # Kent
(None, ".choose_broadcast_area", {}),
],
)
def test_choose_broadcast_sub_area_page_for_district_has_back_link(
client_request,
service_one,
mock_get_draft_broadcast_message,
active_user_create_broadcasts_permission,
prev_area_slug,
expected_back_link_url,
expected_back_link_extra_kwargs,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
"main.choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=str(uuid.UUID(int=0)),
library_slug="wd21-lad21-ctyua21",
area_slug="lad21-E07000105", # Ashford
prev_area_slug=prev_area_slug,
)
assert normalize_spaces(page.select_one("h1").text) == ("Choose an area of Ashford")
back_link = page.select_one(".govuk-back-link")
assert back_link["href"] == url_for(
expected_back_link_url,
service_id=SERVICE_ONE_ID,
broadcast_message_id=str(uuid.UUID(int=0)),
library_slug="wd21-lad21-ctyua21",
**expected_back_link_extra_kwargs,
)
def test_choose_broadcast_sub_area_page_for_county_shows_links_for_districts(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
"main.choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
area_slug="ctyua21-E10000016", # Kent
)
assert normalize_spaces(page.select_one("h1").text) == ("Choose an area of Kent")
live_search = page.select_one("[data-notify-module=live-search]")
assert live_search["data-targets"] == ".file-list-item"
assert live_search.select_one("input")["type"] == "search"
all_choices_checkbox = [
(
choice.select_one("input")["value"],
normalize_spaces(choice.select_one("label").text),
)
for choice in page.select("form[method=post] .govuk-checkboxes__item")
]
districts = [
(
district["href"],
district.text,
)
for district in page.select("form[method=post] a")
]
assert all_choices_checkbox == [
("y", "All of Kent"),
]
assert len(districts) == 12
assert districts[0][0] == url_for(
"main.choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
area_slug="lad21-E07000105",
prev_area_slug="ctyua21-E10000016", # Kent
)
assert districts[0][1] == "Ashford"
assert districts[-1][0] == url_for(
"main.choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
area_slug="lad21-E07000116",
prev_area_slug="ctyua21-E10000016", # Kent
)
assert districts[-1][1] == "Tunbridge Wells"
def test_add_broadcast_area(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
mocker,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
"app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas",
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
".choose_broadcast_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="ctry19",
_data={"areas": ["ctry19-E92000001", "ctry19-W92000004"]},
)
mock_get_polygons_from_areas.assert_called_once_with(area_attribute="simple_polygons")
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
"areas": {
"ids": ["ctry19-E92000001", "ctry19-S92000003", "ctry19-W92000004"],
"names": ["England", "Scotland", "Wales"],
"aggregate_names": ["England", "Scotland", "Wales"],
"simple_polygons": coordinates,
}
},
)
@pytest.mark.parametrize(
"post_data, expected_data",
(
(
{"select_all": "y", "areas": ["wd21-S13002845"]},
{
# wd21-S13002845 is ignored because the user chose ‘Select all…’
"ids": ["lad21-S12000033"],
"names": ["Aberdeen City"],
"aggregate_names": ["Aberdeen City"],
},
),
(
{"areas": ["wd21-S13002845", "wd21-S13002836"]},
{
"ids": ["wd21-S13002845", "wd21-S13002836"],
"names": ["Bridge of Don", "Airyhall/Broomhill/Garthdee"],
"aggregate_names": ["Aberdeen City"],
},
),
),
)
def test_add_broadcast_sub_area_district_view(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
post_data,
expected_data,
mocker,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
"app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas",
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
".choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
area_slug="lad21-S12000033",
_data=post_data,
)
# These two areas are on the broadcast already
expected_data["ids"] = ["ctry19-E92000001", "ctry19-S92000003"] + expected_data["ids"]
expected_data["names"] = ["England", "Scotland"] + expected_data["names"]
expected_data["aggregate_names"] = sorted(["England", "Scotland"] + expected_data["aggregate_names"])
mock_get_polygons_from_areas.assert_called_once_with(area_attribute="simple_polygons")
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
"areas": {
"simple_polygons": coordinates,
**expected_data,
}
},
)
def test_add_broadcast_sub_area_county_view(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
mocker,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
"app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas",
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
".choose_broadcast_sub_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug="wd21-lad21-ctyua21",
area_slug="ctyua21-E10000016", # Kent
_data={"select_all": "y"},
)
mock_get_polygons_from_areas.assert_called_once_with(area_attribute="simple_polygons")
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
"areas": {
"simple_polygons": coordinates,
"ids": [
# These two areas are on the broadcast already
"ctry19-E92000001",
"ctry19-S92000003",
]
+ ["ctyua21-E10000016"],
"names": ["England", "Scotland", "Kent"],
"aggregate_names": ["England", "Kent", "Scotland"],
}
},
)
def test_remove_broadcast_area_page(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
mocker,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
"app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas",
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.get(
".remove_broadcast_area",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
area_slug="ctry19-E92000001",
_expected_redirect=url_for(
".preview_broadcast_areas",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
),
)
mock_get_polygons_from_areas.assert_called_once_with(area_attribute="simple_polygons")
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
"areas": {
"simple_polygons": coordinates,
"names": ["Scotland"],
"aggregate_names": ["Scotland"],
"ids": ["ctry19-S92000003"],
},
},
)
def test_preview_broadcast_message_page(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
".preview_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [normalize_spaces(area.text) for area in page.select(".area-list-item.area-list-item--unremoveable")] == [
"England",
"Scotland",
]
assert normalize_spaces(page.select_one("h2.broadcast-message-heading").text) == ("Emergency alert")
assert normalize_spaces(page.select_one(".broadcast-message-wrapper").text) == ("Emergency alert " "This is a test")
form = page.select_one("form")
assert form["method"] == "post"
assert "action" not in form
def test_start_broadcasting(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
".preview_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
"main.view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
),
),
mock_update_broadcast_message_status.assert_called_once_with(
"pending-approval",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize(
"endpoint, created_by_api, extra_fields, expected_paragraphs",
(
(
".view_current_broadcast",
False,
{
"status": "broadcasting",
"finishes_at": "2020-02-23T23:23:23.000000",
},
[
"Live since 20 February at 8:20pm Stop sending",
"Created by Alice and approved by Bob.",
"Broadcasting stops tomorrow at 11:23pm.",
],
),
(
".view_current_broadcast",
True,
{
"status": "broadcasting",
"finishes_at": "2020-02-23T23:23:23.000000",
},
[
"Live since 20 February at 8:20pm Stop sending",
"Created from an API call and approved by Alice.",
"Broadcasting stops tomorrow at 11:23pm.",
],
),
(
".view_previous_broadcast",
False,
{
"status": "broadcasting",
"finishes_at": "2020-02-22T22:20:20.000000", # 2 mins before now()
},
[
"Sent on 20 February at 8:20pm.",
"Created by Alice and approved by Bob.",
"Finished broadcasting today at 10:20pm.",
],
),
(
".view_previous_broadcast",
True,
{
"status": "broadcasting",
"finishes_at": "2020-02-22T22:20:20.000000", # 2 mins before now()
},
[
"Sent on 20 February at 8:20pm.",
"Created from an API call and approved by Alice.",
"Finished broadcasting today at 10:20pm.",
],
),
(
".view_previous_broadcast",
False,
{
"status": "completed",
"finishes_at": "2020-02-21T21:21:21.000000",
},
[
"Sent on 20 February at 8:20pm.",
"Created by Alice and approved by Bob.",
"Finished broadcasting yesterday at 9:21pm.",
],
),
(
".view_previous_broadcast",
False,
{
"status": "cancelled",
"cancelled_by_id": sample_uuid,
"cancelled_at": "2020-02-21T21:21:21.000000",
},
[
"Sent on 20 February at 8:20pm.",
"Created by Alice and approved by Bob.",
"Stopped by Carol yesterday at 9:21pm.",
],
),
(
".view_previous_broadcast",
False,
{
"status": "cancelled",
"cancelled_by_id": None,
"cancelled_at": "2020-02-21T21:21:21.000000",
},
[
"Sent on 20 February at 8:20pm.",
"Created by Alice and approved by Bob.",
"Stopped by an API call yesterday at 9:21pm.",
],
),
(
".view_rejected_broadcast",
False,
{
"status": "rejected",
"updated_at": "2020-02-21T21:21:21.000000",
},
[
"Rejected yesterday at 9:21pm.",
"Created by Alice and approved by Bob.",
],
),
),
)
@freeze_time("2020-02-22T22:22:22.000000")
def test_view_broadcast_message_page(
mocker,
client_request,
service_one,
active_user_view_permissions,
fake_uuid,
endpoint,
created_by_api,
extra_fields,
expected_paragraphs,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=None if created_by_api else fake_uuid,
approved_by_id=fake_uuid,
starts_at="2020-02-20T20:20:20.000000",
**extra_fields,
),
)
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_view_permissions)
mocker.patch(
"app.user_api_client.get_user",
side_effect=[
user_json(name="Alice"),
user_json(name="Bob"),
user_json(name="Carol"),
],
)
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [normalize_spaces(p.text) for p in page.select("main p.govuk-body")] == expected_paragraphs
@pytest.mark.parametrize(
"endpoint",
(
".view_current_broadcast",
".view_previous_broadcast",
".view_rejected_broadcast",
),
)
@pytest.mark.parametrize(
"status, expected_highlighted_navigation_item, expected_back_link_endpoint",
(
(
"pending-approval",
"Current alerts",
".broadcast_dashboard",
),
(
"broadcasting",
"Current alerts",
".broadcast_dashboard",
),
(
"completed",
"Past alerts",
".broadcast_dashboard_previous",
),
(
"cancelled",
"Past alerts",
".broadcast_dashboard_previous",
),
(
"rejected",
"Rejected alerts",
".broadcast_dashboard_rejected",
),
),
)
@freeze_time("2020-02-22T22:22:22.000000")
def test_view_broadcast_message_shows_correct_highlighted_navigation(
mocker,
client_request,
service_one,
active_user_approve_broadcasts_permission,
fake_uuid,
endpoint,
status,
expected_highlighted_navigation_item,
expected_back_link_endpoint,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
approved_by_id=fake_uuid,
starts_at="2020-02-20T20:20:20.000000",
finishes_at="2021-12-21T21:21:21.000000",
cancelled_at="2021-01-01T01:01:01.000000",
updated_at="2021-01-01T01:01:01.000000",
status=status,
),
)
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.get(
endpoint, service_id=SERVICE_ONE_ID, broadcast_message_id=fake_uuid, _follow_redirects=True
)
assert normalize_spaces(page.select_one(".navigation .selected").text) == (expected_highlighted_navigation_item)
assert page.select_one(".govuk-back-link")["href"] == url_for(
expected_back_link_endpoint,
service_id=SERVICE_ONE_ID,
)
def test_view_pending_broadcast(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
):
broadcast_creator = create_active_user_create_broadcasts_permissions(with_unique_id=True)
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=broadcast_creator["id"],
finishes_at=None,
status="pending-approval",
),
)
client_request.login(active_user_approve_broadcasts_permission)
mocker.patch(
"app.user_api_client.get_user",
return_value=broadcast_creator,
)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner").text)) == (
"Test User Create Broadcasts Permission wants to broadcast Example template "
"No phones will get this alert. "
"Start broadcasting now Reject this alert"
)
assert not page.select(".banner input[type=checkbox]")
form = page.select_one("form.banner")
assert form["method"] == "post"
assert "action" not in form
assert form.select_one("button")
link = form.select_one("a.govuk-link.govuk-link--destructive")
assert link.text == "Reject this alert"
assert link["href"] == url_for(
".reject_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize(
"extra_broadcast_json_fields, expected_banner_text",
(
(
{"reference": "ABC123"},
(
"Test User Create Broadcasts Permission wants to broadcast ABC123 "
"No phones will get this alert. "
"Start broadcasting now Reject this alert"
),
),
(
{"cap_event": "Severe flood warning", "reference": "ABC123"},
(
"Test User Create Broadcasts Permission wants to broadcast Severe flood warning "
"No phones will get this alert. "
"Start broadcasting now Reject this alert"
),
),
),
)
def test_view_pending_broadcast_without_template(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
extra_broadcast_json_fields,
expected_banner_text,
):
broadcast_creator = create_active_user_create_broadcasts_permissions(with_unique_id=True)
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=broadcast_creator["id"],
finishes_at=None,
status="pending-approval",
content="Uh-oh",
**extra_broadcast_json_fields,
),
)
client_request.login(active_user_approve_broadcasts_permission)
mocker.patch(
"app.user_api_client.get_user",
return_value=broadcast_creator,
)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner").text)) == expected_banner_text
assert (normalize_spaces(page.select_one(".broadcast-message-wrapper").text)) == ("Emergency alert " "Uh-oh")
def test_view_pending_broadcast_from_api_call(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=None, # No user created this broadcast
finishes_at=None,
status="pending-approval",
reference="abc123",
content="Uh-oh",
),
)
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner").text)) == (
"An API call wants to broadcast abc123 "
"No phones will get this alert. "
"Start broadcasting now Reject this alert"
)
assert (normalize_spaces(page.select_one(".broadcast-message-wrapper").text)) == ("Emergency alert " "Uh-oh")
@pytest.mark.parametrize(
"channel, expected_label_text",
(
("test", ("I understand this will alert anyone who has switched on the test channel")),
("operator", ("I understand this will alert anyone who has switched on the operator channel")),
("severe", ("I understand this will alert millions of people")),
("government", ("I understand this will alert millions of people, even if they’ve opted out")),
),
)
def test_checkbox_to_confirm_non_training_broadcasts(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
channel,
expected_label_text,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=None,
status="pending-approval",
),
)
service_one["permissions"] += ["broadcast"]
service_one["restricted"] = False
service_one["allowed_broadcast_provider"] = "all"
service_one["broadcast_channel"] = channel
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
label = page.select_one("form.banner label")
assert label["for"] == "confirm"
assert (normalize_spaces(page.select_one("form.banner label").text)) == expected_label_text
assert page.select_one("form.banner input[type=checkbox]")["name"] == "confirm"
assert page.select_one("form.banner input[type=checkbox]")["value"] == "y"
def test_confirm_approve_non_training_broadcasts_errors_if_not_ticked(
mocker,
client_request,
service_one,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
active_user_approve_broadcasts_permission,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=None,
status="pending-approval",
),
)
service_one["permissions"] += ["broadcast"]
service_one["restricted"] = False
service_one["allowed_broadcast_provider"] = "all"
service_one["broadcast_channel"] = "severe"
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.post(
".approve_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_data={},
_expected_status=200,
)
error_message = page.select_one("form.banner .govuk-error-message")
assert error_message["id"] == "confirm-error"
assert normalize_spaces(error_message.text) == ("Error: You need to confirm that you understand")
assert mock_update_broadcast_message.called is False
assert mock_update_broadcast_message_status.called is False
@freeze_time("2020-02-22T22:22:22.000000")
def test_can_approve_own_broadcast_in_training_mode(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status="pending-approval",
),
)
client_request.login(active_user_approve_broadcasts_permission)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner h1").text)) == ("Example template is waiting for approval")
assert (normalize_spaces(page.select_one(".banner p").text)) == (
"When you use a live account you’ll need another member of " "your team to approve your alert."
)
assert (normalize_spaces(page.select_one(".banner details summary").text)) == ("Approve your own alert")
assert (normalize_spaces(page.select_one(".banner details ").text)) == (
"Approve your own alert "
"Because you’re in training mode you can approve your own "
"alerts, to see how it works. "
"No real alerts will be broadcast to anyone’s phone. "
"Start broadcasting now "
"Reject this alert"
)
form = page.select_one(".banner details form")
assert form["method"] == "post"
assert "action" not in form
assert normalize_spaces(form.select_one("button").text) == ("Start broadcasting now")
link = page.select_one(".banner a.govuk-link.govuk-link--destructive")
assert link.text == "Reject this alert"
assert link["href"] == url_for(
".reject_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@freeze_time("2020-02-22T22:22:22.000000")
@pytest.mark.parametrize(
"user",
[
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
],
)
def test_cant_approve_own_broadcast_if_service_is_live(
mocker,
client_request,
service_one,
fake_uuid,
user,
):
service_one["restricted"] = False
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status="pending-approval",
),
)
client_request.login(user)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner h1").text)) == ("Example template is waiting for approval")
assert (normalize_spaces(page.select_one(".banner p").text)) == (
"You need another member of your team to approve your alert."
)
assert not page.select("form")
link = page.select_one(".banner a.govuk-link.govuk-link--destructive")
assert link.text == "Discard this alert"
assert link["href"] == url_for(
".reject_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@freeze_time("2020-02-22T22:22:22.000000")
@pytest.mark.parametrize("user_is_platform_admin", [True, False])
def test_view_only_user_cant_approve_broadcast_created_by_someone_else(
mocker,
client_request,
service_one,
active_user_create_broadcasts_permission,
active_user_view_permissions,
platform_admin_user_no_service_permissions,
fake_uuid,
user_is_platform_admin,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status="pending-approval",
),
)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner").text)) == (
"This alert is waiting for approval You don’t have permission to approve alerts."
)
assert not page.select_one("form")
assert not page.select_one(".banner a")
def test_view_only_user_cant_approve_broadcasts_they_created(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
active_user_view_permissions,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status="pending-approval",
),
)
client_request.login(active_user_view_permissions)
service_one["permissions"] += ["broadcast"]
service_one["restriced"] = False
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner").text)) == (
"This alert is waiting for approval You don’t have permission to approve alerts."
)
assert not page.select_one("form")
assert not page.select_one(".banner a")
@pytest.mark.parametrize(
"is_service_training_mode,banner_text",
[
(
True,
(
"This alert is waiting for approval "
"Another member of your team needs to approve this alert. "
"This service is in training mode. No real alerts will be sent. "
"Reject this alert"
),
),
(
False,
(
"This alert is waiting for approval "
"Another member of your team needs to approve this alert. "
"Reject this alert"
),
),
],
)
def test_user_without_approve_permission_cant_approve_broadcast_created_by_someone_else(
mocker,
client_request,
service_one,
active_user_create_broadcasts_permission,
fake_uuid,
is_service_training_mode,
banner_text,
):
current_user = create_active_user_create_broadcasts_permissions(with_unique_id=True)
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status="pending-approval",
),
)
client_request.login(current_user)
mocker.patch(
"app.user_api_client.get_user",
return_value=active_user_create_broadcasts_permission,
)
service_one["permissions"] += ["broadcast"]
service_one["restricted"] = is_service_training_mode
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner").text)) == banner_text
assert not page.select_one("form")
link = page.select_one(".banner a")
assert link["href"] == url_for(
".reject_broadcast_message", service_id=SERVICE_ONE_ID, broadcast_message_id=fake_uuid
)
def test_user_without_approve_permission_cant_approve_broadcast_they_created(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=active_user_create_broadcasts_permission["id"],
finishes_at=None,
status="pending-approval",
),
)
client_request.login(active_user_create_broadcasts_permission)
service_one["permissions"] += ["broadcast"]
page = client_request.get(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (normalize_spaces(page.select_one(".banner").text)) == (
"Example template is waiting for approval "
"You need another member of your team to approve this alert. "
"This service is in training mode. No real alerts will be sent. "
"Discard this alert"
)
assert not page.select(".banner input[type=checkbox]")
link = page.select_one("a.govuk-link.govuk-link--destructive")
assert link.text == "Discard this alert"
assert link["href"] == url_for(
".reject_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize(
"channel, expected_finishes_at",
(
# 4 hours later
("operator", "2020-02-23T02:22:22"),
("test", "2020-02-23T02:22:22"),
# 22 hours 30 minutes later
("severe", "2020-02-23T20:52:22"),
("government", "2020-02-23T20:52:22"),
(None, "2020-02-23T20:52:22"), # Training mode
),
)
@pytest.mark.parametrize(
"trial_mode, initial_status, post_data, expected_approval, expected_redirect",
(
(
True,
"draft",
{},
False,
partial(
url_for,
".view_current_broadcast",
broadcast_message_id=sample_uuid,
),
),
(
True,
"pending-approval",
{},
True,
partial(
url_for,
".broadcast_tour",
step_index=6,
),
),
(
False,
"pending-approval",
{"confirm": "y"},
True,
partial(
url_for,
".view_current_broadcast",
broadcast_message_id=sample_uuid,
),
),
(
True,
"rejected",
{},
False,
partial(
url_for,
".view_current_broadcast",
broadcast_message_id=sample_uuid,
),
),
(
True,
"broadcasting",
{},
False,
partial(
url_for,
".view_current_broadcast",
broadcast_message_id=sample_uuid,
),
),
(
True,
"cancelled",
{},
False,
partial(
url_for,
".view_current_broadcast",
broadcast_message_id=sample_uuid,
),
),
),
)
@freeze_time("2020-02-22T22:22:22.000000")
def test_confirm_approve_broadcast(
mocker,
client_request,
service_one,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
active_user_approve_broadcasts_permission,
initial_status,
post_data,
expected_approval,
trial_mode,
expected_redirect,
channel,
expected_finishes_at,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status=initial_status,
),
)
service_one["restricted"] = trial_mode
service_one["permissions"] += ["broadcast"]
service_one["broadcast_channel"] = channel
client_request.login(active_user_approve_broadcasts_permission)
client_request.post(
".approve_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=expected_redirect(
service_id=SERVICE_ONE_ID,
),
_data=post_data,
)
if expected_approval:
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
"starts_at": "2020-02-22T22:22:22",
"finishes_at": expected_finishes_at,
},
)
mock_update_broadcast_message_status.assert_called_once_with(
"broadcasting",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
else:
assert mock_update_broadcast_message.called is False
assert mock_update_broadcast_message_status.called is False
@pytest.mark.parametrize(
"user",
(
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
),
)
@freeze_time("2020-02-22T22:22:22.000000")
def test_reject_broadcast(
mocker,
client_request,
service_one,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
user,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status="pending-approval",
),
)
service_one["permissions"] += ["broadcast"]
client_request.login(user)
client_request.get(
".reject_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
".broadcast_dashboard",
service_id=SERVICE_ONE_ID,
),
)
assert mock_update_broadcast_message.called is False
mock_update_broadcast_message_status.assert_called_once_with(
"rejected",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize(
"user",
[
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
],
)
@pytest.mark.parametrize(
"initial_status",
(
"draft",
"rejected",
"broadcasting",
"cancelled",
),
)
@freeze_time("2020-02-22T22:22:22.000000")
def test_cant_reject_broadcast_in_wrong_state(
mocker,
client_request,
service_one,
mock_get_broadcast_template,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
user,
initial_status,
):
mocker.patch(
"app.broadcast_message_api_client.get_broadcast_message",
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at="2020-02-23T23:23:23.000000",
status=initial_status,
),
)
service_one["permissions"] += ["broadcast"]
client_request.login(user)
client_request.get(
".reject_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
),
)
assert mock_update_broadcast_message.called is False
assert mock_update_broadcast_message_status.called is False
@pytest.mark.parametrize(
"endpoint",
(
".view_current_broadcast",
".view_previous_broadcast",
),
)
def test_no_view_page_for_draft(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
endpoint,
):
service_one["permissions"] += ["broadcast"]
client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_status=404,
)
@pytest.mark.parametrize(
"user",
(
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
create_platform_admin_user(),
),
)
def test_cancel_broadcast(
client_request,
service_one,
mock_get_live_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
user,
):
"""
users with 'send_messages' permissions and platform admins should be able to cancel broadcasts.
"""
service_one["permissions"] += ["broadcast"]
client_request.login(user)
page = client_request.get(
".cancel_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert normalize_spaces(page.select_one(".banner-dangerous").text) == (
"Are you sure you want to stop this broadcast now? " "Yes, stop broadcasting"
)
form = page.select_one("form")
assert form["method"] == "post"
assert "action" not in form
assert normalize_spaces(form.select_one("button").text) == ("Yes, stop broadcasting")
assert mock_update_broadcast_message_status.called is False
assert (
url_for(
".cancel_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
not in page
)
@pytest.mark.parametrize(
"user",
[
create_platform_admin_user(),
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
],
)
def test_confirm_cancel_broadcast(
client_request,
service_one,
mock_get_live_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
user,
):
"""
Platform admins and users with any of the broadcast permissions can cancel broadcasts.
"""
service_one["permissions"] += ["broadcast"]
client_request.login(user)
client_request.post(
".cancel_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
".view_previous_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
),
)
mock_update_broadcast_message_status.assert_called_once_with(
"cancelled",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize("method", ("post", "get"))
def test_cant_cancel_broadcast_in_a_different_state(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
active_user_create_broadcasts_permission,
method,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
getattr(client_request, method)(
".cancel_broadcast_message",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
".view_current_broadcast",
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
),
)
assert mock_update_broadcast_message_status.called is False
| mit | f0f6dca940ecf3a2c929ade98456c27c | 29.673216 | 120 | 0.57956 | 3.743028 | false | false | false | false |
alphagov/notifications-admin | app/main/validators.py | 1 | 7088 | import re
from abc import ABC, abstractmethod
from flask import current_app
from notifications_utils.field import Field
from notifications_utils.formatters import formatted_list
from notifications_utils.recipients import InvalidEmailError, validate_email_address
from notifications_utils.sanitise_text import SanitiseSMS
from notifications_utils.template import BroadcastMessageTemplate
from orderedset import OrderedSet
from wtforms import ValidationError
from wtforms.validators import StopValidation
from app import antivirus_client
from app.main._commonly_used_passwords import commonly_used_passwords
from app.models.spreadsheet import Spreadsheet
from app.utils.user import is_gov_user
class CommonlyUsedPassword:
def __init__(self, message=None):
if not message:
message = "Password is in list of commonly used passwords."
self.message = message
def __call__(self, form, field):
if field.data in commonly_used_passwords:
raise ValidationError(self.message)
class CsvFileValidator:
def __init__(self, message="Not a csv file"):
self.message = message
def __call__(self, form, field):
if not Spreadsheet.can_handle(field.data.filename):
raise ValidationError("{} is not a spreadsheet that Notify can read".format(field.data.filename))
class ValidGovEmail:
def __call__(self, form, field):
if field.data == "":
return
from flask import url_for
message = """
Enter a public sector email address or
<a class="govuk-link govuk-link--no-visited-state" href="{}">find out who can use Notify</a>
""".format(
url_for("main.who_can_use_notify")
)
if not is_gov_user(field.data.lower()):
raise ValidationError(message)
class ValidEmail:
message = "Enter a valid email address"
def __call__(self, form, field):
if not field.data:
return
try:
validate_email_address(field.data)
except InvalidEmailError:
raise ValidationError(self.message)
class NoCommasInPlaceHolders:
def __init__(self, message="You cannot put commas between double brackets"):
self.message = message
def __call__(self, form, field):
if "," in "".join(Field(field.data).placeholders):
raise ValidationError(self.message)
class NoElementInSVG(ABC):
@property
@abstractmethod
def element(self):
pass
@property
@abstractmethod
def message(self):
pass
def __call__(self, form, field):
svg_contents = field.data.stream.read().decode("utf-8")
field.data.stream.seek(0)
if f"<{self.element}" in svg_contents.lower():
raise ValidationError(self.message)
class NoEmbeddedImagesInSVG(NoElementInSVG):
element = "image"
message = "This SVG has an embedded raster image in it and will not render well"
class NoTextInSVG(NoElementInSVG):
element = "text"
message = "This SVG has text which has not been converted to paths and may not render well"
class OnlySMSCharacters:
def __init__(self, *args, template_type, **kwargs):
self._template_type = template_type
super().__init__(*args, **kwargs)
def __call__(self, form, field):
non_sms_characters = sorted(list(SanitiseSMS.get_non_compatible_characters(field.data)))
if non_sms_characters:
raise ValidationError(
"You cannot use {} in {}. {} will not show up properly on everyone’s phones.".format(
formatted_list(non_sms_characters, conjunction="or", before_each="", after_each=""),
{
"broadcast": "broadcasts",
"sms": "text messages",
}.get(self._template_type),
("It" if len(non_sms_characters) == 1 else "They"),
)
)
class NoPlaceholders:
def __init__(self, message=None):
self.message = message or ("You can’t use ((double brackets)) to personalise this message")
def __call__(self, form, field):
if Field(field.data).placeholders:
raise ValidationError(self.message)
class BroadcastLength:
def __call__(self, form, field):
template = BroadcastMessageTemplate(
{
"template_type": "broadcast",
"content": field.data,
}
)
if template.content_too_long:
non_gsm_characters = list(sorted(template.non_gsm_characters))
if non_gsm_characters:
raise ValidationError(
f"Content must be {template.max_content_count:,.0f} "
f"characters or fewer because it contains "
f'{formatted_list(non_gsm_characters, conjunction="and", before_each="", after_each="")}'
)
raise ValidationError(f"Content must be {template.max_content_count:,.0f} " f"characters or fewer")
class LettersNumbersSingleQuotesFullStopsAndUnderscoresOnly:
regex = re.compile(r"^[a-zA-Z0-9\s\._']+$")
def __init__(self, message="Use letters and numbers only"):
self.message = message
def __call__(self, form, field):
if field.data and not re.match(self.regex, field.data):
raise ValidationError(self.message)
class DoesNotStartWithDoubleZero:
def __init__(self, message="Cannot start with 00"):
self.message = message
def __call__(self, form, field):
if field.data and field.data.startswith("00"):
raise ValidationError(self.message)
class MustContainAlphanumericCharacters:
regex = re.compile(r".*[a-zA-Z0-9].*[a-zA-Z0-9].*")
def __init__(self, message="Must include at least two alphanumeric characters"):
self.message = message
def __call__(self, form, field):
if field.data and not re.match(self.regex, field.data):
raise ValidationError(self.message)
class CharactersNotAllowed:
def __init__(self, characters_not_allowed, *, message=None):
self.characters_not_allowed = OrderedSet(characters_not_allowed)
self.message = message
def __call__(self, form, field):
illegal_characters = self.characters_not_allowed.intersection(field.data)
if illegal_characters:
if self.message:
raise ValidationError(self.message)
raise ValidationError(
f"Cannot contain "
f'{formatted_list(illegal_characters, conjunction="or", before_each="", after_each="")}'
)
class FileIsVirusFree:
def __call__(self, form, field):
if field.data:
if current_app.config["ANTIVIRUS_ENABLED"]:
try:
virus_free = antivirus_client.scan(field.data)
if not virus_free:
raise StopValidation("Your file contains a virus")
finally:
field.data.seek(0)
| mit | efc4085fcb9a9228eee7ea26ac6f8db7 | 31.495413 | 111 | 0.619848 | 4.196682 | false | false | false | false |
alphagov/notifications-admin | tests/app/notify_client/test_letter_branding_client.py | 1 | 2675 | from unittest.mock import call
from app.notify_client.letter_branding_client import LetterBrandingClient
def test_get_letter_branding(mocker, fake_uuid):
mock_get = mocker.patch(
"app.notify_client.letter_branding_client.LetterBrandingClient.get", return_value={"foo": "bar"}
)
mock_redis_get = mocker.patch("app.extensions.RedisClient.get", return_value=None)
mock_redis_set = mocker.patch("app.extensions.RedisClient.set")
LetterBrandingClient().get_letter_branding(fake_uuid)
mock_get.assert_called_once_with(url="/letter-branding/{}".format(fake_uuid))
mock_redis_get.assert_called_once_with("letter_branding-{}".format(fake_uuid))
mock_redis_set.assert_called_once_with(
"letter_branding-{}".format(fake_uuid),
'{"foo": "bar"}',
ex=604800,
)
def test_get_all_letter_branding(mocker):
mock_get = mocker.patch("app.notify_client.letter_branding_client.LetterBrandingClient.get", return_value=[1, 2, 3])
mock_redis_get = mocker.patch("app.extensions.RedisClient.get", return_value=None)
mock_redis_set = mocker.patch("app.extensions.RedisClient.set")
LetterBrandingClient().get_all_letter_branding()
mock_get.assert_called_once_with(url="/letter-branding")
mock_redis_get.assert_called_once_with("letter_branding")
mock_redis_set.assert_called_once_with(
"letter_branding",
"[1, 2, 3]",
ex=604800,
)
def test_create_letter_branding(mocker):
new_branding = {"filename": "uuid-test", "name": "my letters"}
mock_post = mocker.patch("app.notify_client.letter_branding_client.LetterBrandingClient.post")
mock_redis_delete = mocker.patch("app.extensions.RedisClient.delete")
LetterBrandingClient().create_letter_branding(
filename=new_branding["filename"],
name=new_branding["name"],
)
mock_post.assert_called_once_with(url="/letter-branding", data=new_branding)
mock_redis_delete.assert_called_once_with("letter_branding")
def test_update_letter_branding(mocker, fake_uuid):
branding = {"filename": "uuid-test", "name": "my letters"}
mock_post = mocker.patch("app.notify_client.letter_branding_client.LetterBrandingClient.post")
mock_redis_delete = mocker.patch("app.extensions.RedisClient.delete")
LetterBrandingClient().update_letter_branding(
branding_id=fake_uuid, filename=branding["filename"], name=branding["name"]
)
mock_post.assert_called_once_with(url="/letter-branding/{}".format(fake_uuid), data=branding)
assert mock_redis_delete.call_args_list == [
call("letter_branding-{}".format(fake_uuid)),
call("letter_branding"),
]
| mit | 3b61d01f259fb776914e7e444cd4c7cd | 38.338235 | 120 | 0.697196 | 3.262195 | false | true | false | false |
alphagov/notifications-admin | tests/app/test_navigation.py | 1 | 20962 | import pytest
from flask import Flask
from app import create_app
from app.navigation import (
CaseworkNavigation,
HeaderNavigation,
MainNavigation,
Navigation,
OrgNavigation,
)
from tests.conftest import ORGANISATION_ID, SERVICE_ONE_ID, normalize_spaces
EXCLUDED_ENDPOINTS = tuple(
map(
Navigation.get_endpoint_with_blueprint,
{
"accept_invite",
"accept_org_invite",
"accessibility_statement",
"action_blocked",
"add_data_retention",
"add_organisation",
"add_organisation_email_branding_options",
"add_organisation_from_gp_service",
"add_organisation_from_nhs_local_service",
"add_organisation_letter_branding_options",
"add_service",
"add_service_template",
"api_callbacks",
"api_documentation",
"api_integration",
"api_keys",
"approve_broadcast_message",
"archive_organisation",
"archive_service",
"archive_user",
"bat_phone",
"begin_tour",
"billing_details",
"branding_and_customisation",
"broadcast",
"broadcast_dashboard",
"broadcast_dashboard_previous",
"broadcast_dashboard_rejected",
"broadcast_dashboard_updates",
"broadcast_tour",
"broadcast_tour_live",
"callbacks",
"cancel_broadcast_message",
"cancel_invited_org_user",
"cancel_invited_user",
"cancel_job",
"cancel_letter",
"cancel_letter_job",
"change_user_auth",
"check_and_resend_text_code",
"check_and_resend_verification_code",
"check_contact_list",
"check_messages",
"check_notification",
"check_tour_notification",
"choose_account",
"choose_broadcast_area",
"choose_broadcast_library",
"choose_broadcast_sub_area",
"choose_from_contact_list",
"choose_service",
"choose_template",
"choose_template_to_copy",
"clear_cache",
"confirm_edit_user_email",
"confirm_edit_user_mobile_number",
"confirm_redact_template",
"contact_list",
"conversation",
"conversation_reply",
"conversation_reply_with_template",
"conversation_updates",
"cookies",
"copy_template",
"count_content_length",
"create_and_send_messages",
"create_api_key",
"create_email_branding",
"create_email_branding_government_identity_logo",
"create_email_branding_government_identity_colour",
"create_letter_branding",
"data_retention",
"delete_contact_list",
"delete_service_template",
"delete_template_folder",
"delivery_and_failure",
"delivery_status_callback",
"design_content",
"documentation",
"download_contact_list",
"download_notifications_csv",
"download_organisation_usage_report",
"edit_and_format_messages",
"edit_data_retention",
"edit_organisation_agreement",
"edit_organisation_billing_details",
"edit_organisation_crown_status",
"edit_organisation_domains",
"edit_organisation_go_live_notes",
"edit_organisation_name",
"edit_organisation_notes",
"edit_organisation_type",
"edit_organisation_user",
"edit_service_billing_details",
"edit_service_notes",
"edit_service_template",
"edit_sms_provider_ratio",
"edit_template_postage",
"edit_user_email",
"edit_user_mobile_number",
"edit_user_permissions",
"email_branding",
"email_branding_choose_banner_colour",
"email_branding_choose_banner_type",
"email_branding_choose_logo",
"email_branding_confirm_upload_logo",
"email_branding_enter_government_identity_logo_text",
"email_branding_govuk",
"email_branding_govuk_and_org",
"email_branding_nhs",
"email_branding_organisation",
"email_branding_pool_option",
"email_branding_request",
"email_branding_request_government_identity_logo",
"email_branding_something_else",
"email_branding_upload_logo",
"email_not_received",
"email_template",
"error",
"estimate_usage",
"features",
"features_email",
"features_letters",
"features_sms",
"feedback",
"find_services_by_name",
"find_users_by_email",
"forgot_password",
"get_billing_report",
"get_daily_volumes",
"get_daily_sms_provider_volumes",
"get_volumes_by_service",
"get_example_csv",
"get_notifications_as_json",
"get_started",
"get_started_old",
"go_to_dashboard_after_tour",
"guest_list",
"guidance_index",
"history",
"how_to_pay",
"inbound_sms_admin",
"inbox",
"inbox_download",
"inbox_updates",
"index",
"information_risk_management",
"information_security",
"integration_testing",
"invite_org_user",
"invite_user",
"letter_branding",
"letter_branding_request",
"letter_spec",
"letter_specification",
"letter_template",
"link_service_to_organisation",
"live_services",
"live_services_csv",
"manage_org_users",
"manage_template_folder",
"manage_users",
"message_status",
"monthly",
"new_broadcast",
"new_password",
"no_cookie.check_messages_preview",
"no_cookie.check_notification_preview",
"no_cookie.letter_branding_preview_image",
"no_cookie.send_test_preview",
"no_cookie.view_letter_template_preview",
"no_cookie.view_template_version_preview",
"notifications_sent_by_service",
"old_guest_list",
"old_integration_testing",
"old_roadmap",
"old_service_dashboard",
"old_terms",
"old_using_notify",
"organisation_billing",
"organisation_dashboard",
"organisation_download_agreement",
"organisation_email_branding",
"organisation_letter_branding",
"organisation_settings",
"organisation_trial_mode_services",
"organisations",
"performance",
"platform_admin",
"platform_admin_list_complaints",
"platform_admin_reports",
"platform_admin_returned_letters",
"platform_admin_splash_page",
"preview_broadcast_areas",
"preview_broadcast_message",
"pricing",
"privacy",
"public_agreement",
"public_download_agreement",
"received_text_messages_callback",
"redact_template",
"register",
"register_from_invite",
"register_from_org_invite",
"registration_continue",
"reject_broadcast_message",
"remove_broadcast_area",
"remove_user_from_organisation",
"remove_user_from_service",
"request_to_go_live",
"resend_email_link",
"resend_email_verification",
"returned_letter_summary",
"returned_letters",
"returned_letters_report",
"revalidate_email_sent",
"revoke_api_key",
"roadmap",
"save_contact_list",
"security",
"security_policy",
"send_files_by_email",
"send_files_by_email_contact_details",
"send_from_contact_list",
"send_messages",
"send_notification",
"send_one_off",
"send_one_off_letter_address",
"send_one_off_step",
"send_one_off_to_myself",
"send_uploaded_letter",
"service_accept_agreement",
"service_add_email_reply_to",
"service_add_letter_contact",
"service_add_sms_sender",
"service_agreement",
"service_confirm_agreement",
"service_confirm_delete_email_reply_to",
"service_confirm_delete_letter_contact",
"service_confirm_delete_sms_sender",
"service_dashboard",
"service_dashboard_updates",
"service_delete_email_reply_to",
"service_delete_letter_contact",
"service_delete_sms_sender",
"service_download_agreement",
"service_edit_email_reply_to",
"service_edit_letter_contact",
"service_edit_sms_sender",
"service_email_reply_to",
"service_letter_contact_details",
"service_make_blank_default_letter_contact",
"service_name_change",
"service_preview_email_branding",
"service_preview_letter_branding",
"service_set_auth_type",
"service_confirm_broadcast_account_type",
"service_set_broadcast_channel",
"service_set_broadcast_network",
"service_set_channel",
"service_set_email_branding",
"service_set_email_branding_add_to_branding_pool_step",
"service_set_inbound_number",
"service_set_inbound_sms",
"service_set_international_letters",
"service_set_international_sms",
"service_set_letter_branding",
"service_set_letters",
"service_set_permission",
"service_set_reply_to_email",
"service_set_sms_prefix",
"service_settings",
"service_sms_senders",
"service_switch_count_as_live",
"service_switch_live",
"service_verify_reply_to_address",
"service_verify_reply_to_address_updates",
"services_or_dashboard",
"set_free_sms_allowance",
"set_message_limit",
"set_rate_limit",
"set_sender",
"set_template_sender",
"show_accounts_or_dashboard",
"sign_in",
"sign_out",
"start_job",
"submit_request_to_go_live",
"support",
"support_public",
"template_history",
"template_usage",
"terms",
"thanks",
"tour_step",
"triage",
"trial_mode",
"trial_mode_new",
"trial_services",
"two_factor_sms",
"two_factor_email",
"two_factor_email_interstitial",
"two_factor_email_sent",
"two_factor_webauthn",
"update_email_branding",
"update_letter_branding",
"upload_a_letter",
"upload_contact_list",
"upload_letter",
"uploaded_letter_preview",
"uploaded_letters",
"uploads",
"usage",
"user_information",
"user_profile",
"user_profile_confirm_delete_mobile_number",
"user_profile_confirm_delete_security_key",
"user_profile_delete_security_key",
"user_profile_disable_platform_admin_view",
"user_profile_email",
"user_profile_email_authenticate",
"user_profile_email_confirm",
"user_profile_manage_security_key",
"user_profile_mobile_number",
"user_profile_mobile_number_authenticate",
"user_profile_mobile_number_confirm",
"user_profile_mobile_number_delete",
"user_profile_name",
"user_profile_password",
"user_profile_security_keys",
"using_notify",
"verify",
"verify_email",
"view_current_broadcast",
"view_job",
"view_job_csv",
"view_job_updates",
"view_jobs",
"view_letter_notification_as_preview",
"view_letter_upload_as_preview",
"view_notification",
"view_notification_updates",
"view_notifications",
"view_notifications_csv",
"view_previous_broadcast",
"view_provider",
"view_providers",
"view_rejected_broadcast",
"view_template",
"view_template_version",
"view_template_versions",
"webauthn_begin_register",
"webauthn_complete_register",
"webauthn_begin_authentication",
"webauthn_complete_authentication",
"who_can_use_notify",
"who_its_for",
"write_new_broadcast",
},
)
)
def flask_app():
app = Flask("app")
create_app(app)
ctx = app.app_context()
ctx.push()
yield app
all_endpoints = [rule.endpoint for rule in next(flask_app()).url_map.iter_rules()]
navigation_instances = (
MainNavigation(),
HeaderNavigation(),
OrgNavigation(),
CaseworkNavigation(),
)
@pytest.mark.parametrize(
"navigation_instance", navigation_instances, ids=(x.__class__.__name__ for x in navigation_instances)
)
def test_navigation_items_are_properly_defined(navigation_instance):
for endpoint in navigation_instance.endpoints_with_navigation:
assert endpoint in all_endpoints, "{} is not a real endpoint (in {}.mapping)".format(
endpoint, type(navigation_instance).__name__
)
assert (
navigation_instance.endpoints_with_navigation.count(endpoint) == 1
), "{} found more than once in {}.mapping".format(endpoint, type(navigation_instance).__name__)
def test_excluded_navigation_items_are_properly_defined():
for endpoint in EXCLUDED_ENDPOINTS:
assert endpoint in all_endpoints, f"{endpoint} is not a real endpoint (in EXCLUDED_ENDPOINTS)"
assert EXCLUDED_ENDPOINTS.count(endpoint) == 1, f"{endpoint} found more than once in EXCLUDED_ENDPOINTS"
@pytest.mark.parametrize(
"navigation_instance", navigation_instances, ids=(x.__class__.__name__ for x in navigation_instances)
)
def test_all_endpoints_are_covered(navigation_instance):
covered_endpoints = (
navigation_instance.endpoints_with_navigation + EXCLUDED_ENDPOINTS + ("static", "status.show_status", "metrics")
)
for endpoint in all_endpoints:
assert endpoint in covered_endpoints, f"{endpoint} is not listed or excluded"
@pytest.mark.parametrize(
"navigation_instance", navigation_instances, ids=(x.__class__.__name__ for x in navigation_instances)
)
@pytest.mark.xfail(raises=KeyError)
def test_raises_on_invalid_navigation_item(client_request, navigation_instance):
navigation_instance.is_selected("foo")
@pytest.mark.parametrize(
"endpoint, selected_nav_item",
[
("main.choose_template", "Templates"),
("main.manage_users", "Team members"),
],
)
def test_a_page_should_nave_selected_navigation_item(
client_request,
mock_get_service_templates,
mock_get_users_by_service,
mock_get_invites_for_service,
mock_get_template_folders,
mock_get_api_keys,
endpoint,
selected_nav_item,
):
page = client_request.get(endpoint, service_id=SERVICE_ONE_ID)
selected_nav_items = page.select(".navigation a.selected")
assert len(selected_nav_items) == 1
assert selected_nav_items[0].text.strip() == selected_nav_item
@pytest.mark.parametrize(
"endpoint, selected_nav_item",
[
("main.documentation", "Documentation"),
("main.support", "Support"),
],
)
def test_a_page_should_nave_selected_header_navigation_item(
client_request,
endpoint,
selected_nav_item,
):
page = client_request.get(endpoint, service_id=SERVICE_ONE_ID)
selected_nav_items = page.select(".govuk-header__navigation-item--active")
assert len(selected_nav_items) == 1
assert selected_nav_items[0].text.strip() == selected_nav_item
@pytest.mark.parametrize(
"endpoint, selected_nav_item",
[
("main.organisation_dashboard", "Usage"),
("main.manage_org_users", "Team members"),
],
)
def test_a_page_should_nave_selected_org_navigation_item(
client_request,
mock_get_organisation,
mock_get_users_for_organisation,
mock_get_invited_users_for_organisation,
endpoint,
selected_nav_item,
mocker,
):
mocker.patch("app.organisations_client.get_services_and_usage", return_value={"services": {}})
page = client_request.get(endpoint, org_id=ORGANISATION_ID)
selected_nav_items = page.select(".navigation a.selected")
assert len(selected_nav_items) == 1
assert selected_nav_items[0].text.strip() == selected_nav_item
def test_navigation_urls(
client_request,
mock_get_service_templates,
mock_get_template_folders,
mock_get_api_keys,
):
page = client_request.get("main.choose_template", service_id=SERVICE_ONE_ID)
assert [a["href"] for a in page.select(".navigation a")] == [
"/services/{}".format(SERVICE_ONE_ID),
"/services/{}/templates".format(SERVICE_ONE_ID),
"/services/{}/uploads".format(SERVICE_ONE_ID),
"/services/{}/users".format(SERVICE_ONE_ID),
"/services/{}/usage".format(SERVICE_ONE_ID),
"/services/{}/service-settings".format(SERVICE_ONE_ID),
"/services/{}/api".format(SERVICE_ONE_ID),
]
def test_navigation_for_services_with_broadcast_permission(
client_request,
service_one,
mock_get_service_templates,
mock_get_template_folders,
mock_get_api_keys,
active_user_create_broadcasts_permission,
):
service_one["permissions"] += ["broadcast"]
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get("main.choose_template", service_id=SERVICE_ONE_ID)
assert [a["href"] for a in page.select(".navigation a")] == [
"/services/{}/current-alerts".format(SERVICE_ONE_ID),
"/services/{}/past-alerts".format(SERVICE_ONE_ID),
"/services/{}/rejected-alerts".format(SERVICE_ONE_ID),
"/services/{}/templates".format(SERVICE_ONE_ID),
"/services/{}/users".format(SERVICE_ONE_ID),
]
def test_navigation_for_services_with_broadcast_permission_platform_admin(
client_request,
service_one,
mock_get_service_templates,
mock_get_template_folders,
mock_get_api_keys,
platform_admin_user,
):
service_one["permissions"] += ["broadcast"]
client_request.login(platform_admin_user)
page = client_request.get("main.choose_template", service_id=SERVICE_ONE_ID)
assert [a["href"] for a in page.select(".navigation a")] == [
"/services/{}/current-alerts".format(SERVICE_ONE_ID),
"/services/{}/past-alerts".format(SERVICE_ONE_ID),
"/services/{}/rejected-alerts".format(SERVICE_ONE_ID),
"/services/{}/templates".format(SERVICE_ONE_ID),
"/services/{}/users".format(SERVICE_ONE_ID),
"/services/{}/service-settings".format(SERVICE_ONE_ID),
"/services/{}/api/keys".format(SERVICE_ONE_ID),
]
def test_caseworkers_get_caseworking_navigation(
client_request,
mock_get_template_folders,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_api_keys,
active_caseworking_user,
):
client_request.login(active_caseworking_user)
page = client_request.get("main.choose_template", service_id=SERVICE_ONE_ID)
assert normalize_spaces(page.select_one("header + .govuk-width-container nav").text) == (
"Templates Sent messages Uploads Team members"
)
def test_caseworkers_see_jobs_nav_if_jobs_exist(
client_request,
mock_get_service_templates,
mock_get_template_folders,
mock_has_jobs,
active_caseworking_user,
mock_get_api_keys,
):
client_request.login(active_caseworking_user)
page = client_request.get("main.choose_template", service_id=SERVICE_ONE_ID)
assert normalize_spaces(page.select_one("header + .govuk-width-container nav").text) == (
"Templates Sent messages Uploads Team members"
)
| mit | 90b49fcfa7a3a9f46d9787caaa36228c | 34.289562 | 120 | 0.57089 | 4.028055 | false | false | false | false |
samuelcolvin/pydantic | tests/mypy/modules/plugin_fail.py | 1 | 4101 | from typing import Any, Generic, Optional, Set, TypeVar, Union
from pydantic import BaseModel, BaseSettings, Extra, Field
from pydantic.dataclasses import dataclass
from pydantic.generics import GenericModel
class Model(BaseModel):
x: int
y: str
def method(self) -> None:
pass
class Config:
alias_generator = None
allow_mutation = False
extra = Extra.forbid
def config_method(self) -> None:
...
model = Model(x=1, y='y', z='z')
model = Model(x=1)
model.y = 'a'
Model.from_orm({})
Model.from_orm({}) # type: ignore[pydantic-orm] # noqa F821
class ForbidExtraModel(BaseModel):
class Config:
extra = 'forbid'
ForbidExtraModel(x=1)
class ForbidExtraModel2(BaseModel):
class Config:
extra = 'forbid'
validate_all = False
Config.validate_all = True
ForbidExtraModel2(x=1)
class BadExtraModel(BaseModel):
class Config:
extra = 1 # type: ignore[pydantic-config] # noqa F821
extra = 1
class BadConfig1(BaseModel):
class Config:
orm_mode: Any = {} # not sensible, but should still be handled gracefully
class BadConfig2(BaseModel):
class Config:
orm_mode = list # not sensible, but should still be handled gracefully
class InheritingModel(Model):
class Config:
allow_mutation = True
class DefaultTestingModel(BaseModel):
# Required
a: int
b: int = ...
c: int = Field(...)
d: Union[int, str]
e = ...
# Not required
f: Optional[int]
g: int = 1
h: int = Field(1)
i: int = Field(None)
j = 1
DefaultTestingModel()
class UndefinedAnnotationModel(BaseModel):
undefined: Undefined # noqa F821
UndefinedAnnotationModel()
class Settings(BaseSettings):
x: int
Model.construct(x=1)
Model.construct(_fields_set={'x'}, x=1, y='2')
Model.construct(x='1', y='2')
Settings() # should pass here due to possibly reading from environment
# Strict mode fails
inheriting = InheritingModel(x='1', y='1')
Settings(x='1')
Model(x='1', y='2')
class Blah(BaseModel):
fields_set: Optional[Set[str]] = None
# (comment to keep line numbers unchanged)
T = TypeVar('T')
class Response(GenericModel, Generic[T]):
data: T
error: Optional[str]
response = Response[Model](data=model, error=None)
response = Response[Model](data=1, error=None)
class AliasModel(BaseModel):
x: str = Field(..., alias='y')
z: int
AliasModel(y=1, z=2)
x_alias = 'y'
class DynamicAliasModel(BaseModel):
x: str = Field(..., alias=x_alias)
z: int
DynamicAliasModel(y='y', z='1')
class DynamicAliasModel2(BaseModel):
x: str = Field(..., alias=x_alias)
z: int
class Config:
allow_population_by_field_name = True
DynamicAliasModel2(y='y', z=1)
DynamicAliasModel2(x='y', z=1)
class AliasGeneratorModel(BaseModel):
x: int
class Config:
alias_generator = lambda x: x + '_' # noqa E731
AliasGeneratorModel(x=1)
AliasGeneratorModel(x_=1)
AliasGeneratorModel(z=1)
class AliasGeneratorModel2(BaseModel):
x: int = Field(..., alias='y')
class Config: # type: ignore[pydantic-alias] # noqa F821
alias_generator = lambda x: x + '_' # noqa E731
class UntypedFieldModel(BaseModel):
x: int = 1
y = 2
z = 2 # type: ignore[pydantic-field] # noqa F821
AliasGeneratorModel2(x=1)
AliasGeneratorModel2(y=1, z=1)
class CoverageTester(Missing): # noqa F821
def from_orm(self) -> None:
pass
CoverageTester().from_orm()
@dataclass(config={})
class AddProject:
name: str
slug: Optional[str]
description: Optional[str]
p = AddProject(name='x', slug='y', description='z')
# Same as Model, but with frozen = True
class FrozenModel(BaseModel):
x: int
y: str
class Config:
alias_generator = None
frozen = True
extra = Extra.forbid
frozenmodel = FrozenModel(x=1, y='b')
frozenmodel.y = 'a'
class InheritingModel2(FrozenModel):
class Config:
frozen = False
inheriting2 = InheritingModel2(x=1, y='c')
inheriting2.y = 'd'
| mit | 4ac727aee62fbf2e22a35d3f2fe7d689 | 16.986842 | 82 | 0.644477 | 3.252181 | false | true | false | false |
samuelcolvin/pydantic | pydantic/json.py | 1 | 3300 | import datetime
from collections import deque
from decimal import Decimal
from enum import Enum
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
from pathlib import Path
from re import Pattern
from types import GeneratorType
from typing import Any, Callable, Dict, Type, Union
from uuid import UUID
from .color import Color
from .networks import NameEmail
from .types import SecretBytes, SecretStr
__all__ = 'pydantic_encoder', 'custom_pydantic_encoder', 'timedelta_isoformat'
def isoformat(o: Union[datetime.date, datetime.time]) -> str:
return o.isoformat()
def decimal_encoder(dec_value: Decimal) -> Union[int, float]:
"""
Encodes a Decimal as int of there's no exponent, otherwise float
This is useful when we use ConstrainedDecimal to represent Numeric(x,0)
where a integer (but not int typed) is used. Encoding this as a float
results in failed round-tripping between encode and parse.
Our Id type is a prime example of this.
>>> decimal_encoder(Decimal("1.0"))
1.0
>>> decimal_encoder(Decimal("1"))
1
"""
if dec_value.as_tuple().exponent >= 0:
return int(dec_value)
else:
return float(dec_value)
ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = {
bytes: lambda o: o.decode(),
Color: str,
datetime.date: isoformat,
datetime.datetime: isoformat,
datetime.time: isoformat,
datetime.timedelta: lambda td: td.total_seconds(),
Decimal: decimal_encoder,
Enum: lambda o: o.value,
frozenset: list,
deque: list,
GeneratorType: list,
IPv4Address: str,
IPv4Interface: str,
IPv4Network: str,
IPv6Address: str,
IPv6Interface: str,
IPv6Network: str,
NameEmail: str,
Path: str,
Pattern: lambda o: o.pattern,
SecretBytes: str,
SecretStr: str,
set: list,
UUID: str,
}
def pydantic_encoder(obj: Any) -> Any:
from dataclasses import asdict, is_dataclass
from .main import BaseModel
if isinstance(obj, BaseModel):
return obj.dict()
elif is_dataclass(obj):
return asdict(obj)
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = ENCODERS_BY_TYPE[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
def custom_pydantic_encoder(type_encoders: Dict[Any, Callable[[Type[Any]], Any]], obj: Any) -> Any:
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = type_encoders[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
return pydantic_encoder(obj)
def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for timedeltas.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'P{td.days}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S'
| mit | 679127c8837b7dd8e28ee4a393cff8b5 | 28.464286 | 102 | 0.669091 | 3.767123 | false | false | false | false |
samuelcolvin/pydantic | tests/test_construction.py | 1 | 8888 | import pickle
from typing import Any, List, Optional
import pytest
from pydantic import BaseModel, Field, PrivateAttr
from pydantic.fields import Undefined
class Model(BaseModel):
a: float
b: int = 10
def test_simple_construct():
m = Model.construct(a=3.14)
assert m.a == 3.14
assert m.b == 10
assert m.__fields_set__ == {'a'}
assert m.dict() == {'a': 3.14, 'b': 10}
def test_construct_misuse():
m = Model.construct(b='foobar')
assert m.b == 'foobar'
assert m.dict() == {'b': 'foobar'}
with pytest.raises(AttributeError, match="'Model' object has no attribute 'a'"):
print(m.a)
def test_construct_fields_set():
m = Model.construct(a=3.0, b=-1, _fields_set={'a'})
assert m.a == 3
assert m.b == -1
assert m.__fields_set__ == {'a'}
assert m.dict() == {'a': 3, 'b': -1}
def test_construct_allow_extra():
"""construct() should allow extra fields"""
class Foo(BaseModel):
x: int
assert Foo.construct(x=1, y=2).dict() == {'x': 1, 'y': 2}
def test_construct_keep_order():
class Foo(BaseModel):
a: int
b: int = 42
c: float
instance = Foo(a=1, b=321, c=3.14)
instance_construct = Foo.construct(**instance.dict())
assert instance == instance_construct
assert instance.dict() == instance_construct.dict()
assert instance.json() == instance_construct.json()
def test_large_any_str():
class Model(BaseModel):
a: bytes
b: str
content_bytes = b'x' * (2**16 + 1)
content_str = 'x' * (2**16 + 1)
m = Model(a=content_bytes, b=content_str)
assert m.a == content_bytes
assert m.b == content_str
def test_simple_copy():
m = Model(a=24)
m2 = m.copy()
assert m.a == m2.a == 24
assert m.b == m2.b == 10
assert m == m2
assert m.__fields__ == m2.__fields__
class ModelTwo(BaseModel):
__foo__ = PrivateAttr({'private'})
a: float
b: int = 10
c: str = 'foobar'
d: Model
def test_deep_copy():
m = ModelTwo(a=24, d=Model(a='12'))
m.__foo__ = {'new value'}
m2 = m.copy(deep=True)
assert m.a == m2.a == 24
assert m.b == m2.b == 10
assert m.c == m2.c == 'foobar'
assert m.d is not m2.d
assert m == m2
assert m.__fields__ == m2.__fields__
assert m.__foo__ == m2.__foo__
assert m.__foo__ is not m2.__foo__
def test_copy_exclude():
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.copy(exclude={'b'})
assert m.a == m2.a == 24
assert isinstance(m2.d, Model)
assert m2.d.a == 12
assert hasattr(m2, 'c')
assert not hasattr(m2, 'b')
assert set(m.dict().keys()) == {'a', 'b', 'c', 'd'}
assert set(m2.dict().keys()) == {'a', 'c', 'd'}
assert m != m2
def test_copy_include():
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.copy(include={'a'})
assert m.a == m2.a == 24
assert set(m.dict().keys()) == {'a', 'b', 'c', 'd'}
assert set(m2.dict().keys()) == {'a'}
assert m != m2
def test_copy_include_exclude():
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.copy(include={'a', 'b', 'c'}, exclude={'c'})
assert set(m.dict().keys()) == {'a', 'b', 'c', 'd'}
assert set(m2.dict().keys()) == {'a', 'b'}
def test_copy_advanced_exclude():
class SubSubModel(BaseModel):
a: str
b: str
class SubModel(BaseModel):
c: str
d: List[SubSubModel]
class Model(BaseModel):
e: str
f: SubModel
m = Model(e='e', f=SubModel(c='foo', d=[SubSubModel(a='a', b='b'), SubSubModel(a='c', b='e')]))
m2 = m.copy(exclude={'f': {'c': ..., 'd': {-1: {'a'}}}})
assert hasattr(m.f, 'c')
assert not hasattr(m2.f, 'c')
assert m2.dict() == {'e': 'e', 'f': {'d': [{'a': 'a', 'b': 'b'}, {'b': 'e'}]}}
m2 = m.copy(exclude={'e': ..., 'f': {'d'}})
assert m2.dict() == {'f': {'c': 'foo'}}
def test_copy_advanced_include():
class SubSubModel(BaseModel):
a: str
b: str
class SubModel(BaseModel):
c: str
d: List[SubSubModel]
class Model(BaseModel):
e: str
f: SubModel
m = Model(e='e', f=SubModel(c='foo', d=[SubSubModel(a='a', b='b'), SubSubModel(a='c', b='e')]))
m2 = m.copy(include={'f': {'c'}})
assert hasattr(m.f, 'c')
assert hasattr(m2.f, 'c')
assert m2.dict() == {'f': {'c': 'foo'}}
m2 = m.copy(include={'e': ..., 'f': {'d': {-1}}})
assert m2.dict() == {'e': 'e', 'f': {'d': [{'a': 'c', 'b': 'e'}]}}
def test_copy_advanced_include_exclude():
class SubSubModel(BaseModel):
a: str
b: str
class SubModel(BaseModel):
c: str
d: List[SubSubModel]
class Model(BaseModel):
e: str
f: SubModel
m = Model(e='e', f=SubModel(c='foo', d=[SubSubModel(a='a', b='b'), SubSubModel(a='c', b='e')]))
m2 = m.copy(include={'e': ..., 'f': {'d'}}, exclude={'e': ..., 'f': {'d': {0}}})
assert m2.dict() == {'f': {'d': [{'a': 'c', 'b': 'e'}]}}
def test_copy_update():
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.copy(update={'a': 'different'})
assert m.a == 24
assert m2.a == 'different'
assert set(m.dict().keys()) == set(m2.dict().keys()) == {'a', 'b', 'c', 'd'}
assert m != m2
def test_copy_update_unset():
class Foo(BaseModel):
foo: Optional[str]
bar: Optional[str]
assert Foo(foo='hello').copy(update={'bar': 'world'}).json(exclude_unset=True) == '{"foo": "hello", "bar": "world"}'
def test_copy_set_fields():
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.copy()
assert m.dict(exclude_unset=True) == {'a': 24.0, 'd': {'a': 12}}
assert m.dict(exclude_unset=True) == m2.dict(exclude_unset=True)
def test_simple_pickle():
m = Model(a='24')
b = pickle.dumps(m)
m2 = pickle.loads(b)
assert m.a == m2.a == 24
assert m.b == m2.b == 10
assert m == m2
assert m is not m2
assert tuple(m) == (('a', 24.0), ('b', 10))
assert tuple(m2) == (('a', 24.0), ('b', 10))
assert m.__fields__ == m2.__fields__
def test_recursive_pickle():
m = ModelTwo(a=24, d=Model(a='123.45'))
m2 = pickle.loads(pickle.dumps(m))
assert m == m2
assert m.d.a == 123.45
assert m2.d.a == 123.45
assert m.__fields__ == m2.__fields__
assert m.__foo__ == m2.__foo__
def test_pickle_undefined():
m = ModelTwo(a=24, d=Model(a='123.45'))
m2 = pickle.loads(pickle.dumps(m))
assert m2.__foo__ == {'private'}
m.__foo__ = Undefined
m3 = pickle.loads(pickle.dumps(m))
assert not hasattr(m3, '__foo__')
def test_copy_undefined():
m = ModelTwo(a=24, d=Model(a='123.45'))
m2 = m.copy()
assert m2.__foo__ == {'private'}
m.__foo__ = Undefined
m3 = m.copy()
assert not hasattr(m3, '__foo__')
def test_immutable_copy_with_allow_mutation():
class Model(BaseModel):
a: int
b: int
class Config:
allow_mutation = False
m = Model(a=40, b=10)
assert m == m.copy()
m2 = m.copy(update={'b': 12})
assert repr(m2) == 'Model(a=40, b=12)'
with pytest.raises(TypeError):
m2.b = 13
def test_immutable_copy_with_frozen():
class Model(BaseModel):
a: int
b: int
class Config:
frozen = True
m = Model(a=40, b=10)
assert m == m.copy()
m2 = m.copy(update={'b': 12})
assert repr(m2) == 'Model(a=40, b=12)'
with pytest.raises(TypeError):
m2.b = 13
def test_pickle_fields_set():
m = Model(a=24)
assert m.dict(exclude_unset=True) == {'a': 24}
m2 = pickle.loads(pickle.dumps(m))
assert m2.dict(exclude_unset=True) == {'a': 24}
def test_copy_update_exclude():
class SubModel(BaseModel):
a: str
b: str
class Model(BaseModel):
c: str
d: SubModel
m = Model(c='ex', d=dict(a='ax', b='bx'))
assert m.dict() == {'c': 'ex', 'd': {'a': 'ax', 'b': 'bx'}}
assert m.copy(exclude={'c'}).dict() == {'d': {'a': 'ax', 'b': 'bx'}}
assert m.copy(exclude={'c'}, update={'c': 42}).dict() == {'c': 42, 'd': {'a': 'ax', 'b': 'bx'}}
assert m._calculate_keys(exclude={'x': ...}, include=None, exclude_unset=False) == {'c', 'd'}
assert m._calculate_keys(exclude={'x': ...}, include=None, exclude_unset=False, update={'c': 42}) == {'d'}
def test_shallow_copy_modify():
class X(BaseModel):
val: int
deep: Any
x = X(val=1, deep={'deep_thing': [1, 2]})
y = x.copy()
y.val = 2
y.deep['deep_thing'].append(3)
assert x.val == 1
assert y.val == 2
# deep['deep_thing'] gets modified
assert x.deep['deep_thing'] == [1, 2, 3]
assert y.deep['deep_thing'] == [1, 2, 3]
def test_construct_default_factory():
class Model(BaseModel):
foo: List[int] = Field(default_factory=list)
bar: str = 'Baz'
m = Model.construct()
assert m.foo == []
assert m.bar == 'Baz'
| mit | df3205921ed096ff587cd3d3ff056e19 | 23.620499 | 120 | 0.526553 | 2.898891 | false | true | false | false |
samuelcolvin/pydantic | tests/test_networks_ipaddress.py | 1 | 17309 | from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
import pytest
from pydantic import BaseModel, IPvAnyAddress, IPvAnyInterface, IPvAnyNetwork, ValidationError
#
# ipaddress.IPv4Address
# ipaddress.IPv6Address
# pydantic.IPvAnyAddress
#
@pytest.mark.parametrize(
'value,cls',
[
('0.0.0.0', IPv4Address),
('1.1.1.1', IPv4Address),
('10.10.10.10', IPv4Address),
('192.168.0.1', IPv4Address),
('255.255.255.255', IPv4Address),
('::1:0:1', IPv6Address),
('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', IPv6Address),
(b'\x00\x00\x00\x00', IPv4Address),
(b'\x01\x01\x01\x01', IPv4Address),
(b'\n\n\n\n', IPv4Address),
(b'\xc0\xa8\x00\x01', IPv4Address),
(b'\xff\xff\xff\xff', IPv4Address),
(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01', IPv6Address),
(b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Address),
(0, IPv4Address),
(16_843_009, IPv4Address),
(168_430_090, IPv4Address),
(3_232_235_521, IPv4Address),
(4_294_967_295, IPv4Address),
(4_294_967_297, IPv6Address),
(340_282_366_920_938_463_463_374_607_431_768_211_455, IPv6Address),
(IPv4Address('192.168.0.1'), IPv4Address),
(IPv6Address('::1:0:1'), IPv6Address),
],
)
def test_ipaddress_success(value, cls):
class Model(BaseModel):
ip: IPvAnyAddress
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value',
[
'0.0.0.0',
'1.1.1.1',
'10.10.10.10',
'192.168.0.1',
'255.255.255.255',
b'\x00\x00\x00\x00',
b'\x01\x01\x01\x01',
b'\n\n\n\n',
b'\xc0\xa8\x00\x01',
b'\xff\xff\xff\xff',
0,
16_843_009,
168_430_090,
3_232_235_521,
4_294_967_295,
IPv4Address('0.0.0.0'),
IPv4Address('1.1.1.1'),
IPv4Address('10.10.10.10'),
IPv4Address('192.168.0.1'),
IPv4Address('255.255.255.255'),
],
)
def test_ipv4address_success(value):
class Model(BaseModel):
ipv4: IPv4Address
assert Model(ipv4=value).ipv4 == IPv4Address(value)
@pytest.mark.parametrize(
'value',
[
'::1:0:1',
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01',
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff',
4_294_967_297,
340_282_366_920_938_463_463_374_607_431_768_211_455,
IPv6Address('::1:0:1'),
IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'),
],
)
def test_ipv6address_success(value):
class Model(BaseModel):
ipv6: IPv6Address
assert Model(ipv6=value).ipv6 == IPv6Address(value)
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
(
'192.168.0.1.1.1',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
(
-1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
(
2**128 + 1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
],
)
def test_ipaddress_fails(value, errors):
class Model(BaseModel):
ip: IPvAnyAddress
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}],
),
(
'192.168.0.1.1.1',
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}],
),
(-1, [{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}]),
(
2**32 + 1,
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}],
),
(
IPv6Address('::0:1:0'),
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}],
),
],
)
def test_ipv4address_fails(value, errors):
class Model(BaseModel):
ipv4: IPv4Address
with pytest.raises(ValidationError) as exc_info:
Model(ipv4=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
(
'192.168.0.1.1.1',
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
(-1, [{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}]),
(
2**128 + 1,
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
(
IPv4Address('192.168.0.1'),
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
],
)
def test_ipv6address_fails(value, errors):
class Model(BaseModel):
ipv6: IPv6Address
with pytest.raises(ValidationError) as exc_info:
Model(ipv6=value)
assert exc_info.value.errors() == errors
#
# ipaddress.IPv4Network
# ipaddress.IPv6Network
# pydantic.IPvAnyNetwork
#
@pytest.mark.parametrize(
'value,cls',
[
('192.168.0.0/24', IPv4Network),
('192.168.128.0/30', IPv4Network),
('2001:db00::0/120', IPv6Network),
(2**32 - 1, IPv4Network), # no mask equals to mask /32
(20_282_409_603_651_670_423_947_251_286_015, IPv6Network), # /128
(b'\xff\xff\xff\xff', IPv4Network), # /32
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Network),
(('192.168.0.0', 24), IPv4Network),
(('2001:db00::0', 120), IPv6Network),
(IPv4Network('192.168.0.0/24'), IPv4Network),
],
)
def test_ipnetwork_success(value, cls):
class Model(BaseModel):
ip: IPvAnyNetwork = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,cls',
[
('192.168.0.0/24', IPv4Network),
('192.168.128.0/30', IPv4Network),
(2**32 - 1, IPv4Network), # no mask equals to mask /32
(b'\xff\xff\xff\xff', IPv4Network), # /32
(('192.168.0.0', 24), IPv4Network),
(IPv4Network('192.168.0.0/24'), IPv4Network),
],
)
def test_ip_v4_network_success(value, cls):
class Model(BaseModel):
ip: IPv4Network = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,cls',
[
('2001:db00::0/120', IPv6Network),
(20_282_409_603_651_670_423_947_251_286_015, IPv6Network), # /128
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Network),
(('2001:db00::0', 120), IPv6Network),
(IPv6Network('2001:db00::0/120'), IPv6Network),
],
)
def test_ip_v6_network_success(value, cls):
class Model(BaseModel):
ip: IPv6Network = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type': 'value_error.ipvanynetwork'}],
),
(
'192.168.0.1.1.1/24',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type': 'value_error.ipvanynetwork'}],
),
(
-1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type': 'value_error.ipvanynetwork'}],
),
(
2**128 + 1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type': 'value_error.ipvanynetwork'}],
),
],
)
def test_ipnetwork_fails(value, errors):
class Model(BaseModel):
ip: IPvAnyNetwork = None
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 network', 'type': 'value_error.ipv4network'}],
),
(
'192.168.0.1.1.1/24',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 network', 'type': 'value_error.ipv4network'}],
),
(-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 network', 'type': 'value_error.ipv4network'}]),
(
2**128 + 1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 network', 'type': 'value_error.ipv4network'}],
),
(
'2001:db00::1/120',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 network', 'type': 'value_error.ipv4network'}],
),
],
)
def test_ip_v4_network_fails(value, errors):
class Model(BaseModel):
ip: IPv4Network = None
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv6 network', 'type': 'value_error.ipv6network'}],
),
(
'192.168.0.1.1.1/24',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv6 network', 'type': 'value_error.ipv6network'}],
),
(-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv6 network', 'type': 'value_error.ipv6network'}]),
(
2**128 + 1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv6 network', 'type': 'value_error.ipv6network'}],
),
(
'192.168.0.1/24',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv6 network', 'type': 'value_error.ipv6network'}],
),
],
)
def test_ip_v6_network_fails(value, errors):
class Model(BaseModel):
ip: IPv6Network = None
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
#
# ipaddress.IPv4Interface
# ipaddress.IPv6Interface
# pydantic.IPvAnyInterface
#
@pytest.mark.parametrize(
'value,cls',
[
('192.168.0.0/24', IPv4Interface),
('192.168.0.1/24', IPv4Interface),
('192.168.128.0/30', IPv4Interface),
('192.168.128.1/30', IPv4Interface),
('2001:db00::0/120', IPv6Interface),
('2001:db00::1/120', IPv6Interface),
(2**32 - 1, IPv4Interface), # no mask equals to mask /32
(2**32 - 1, IPv4Interface), # so ``strict`` has no effect
(20_282_409_603_651_670_423_947_251_286_015, IPv6Interface), # /128
(20_282_409_603_651_670_423_947_251_286_014, IPv6Interface),
(b'\xff\xff\xff\xff', IPv4Interface), # /32
(b'\xff\xff\xff\xff', IPv4Interface),
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Interface),
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Interface),
(('192.168.0.0', 24), IPv4Interface),
(('192.168.0.1', 24), IPv4Interface),
(('2001:db00::0', 120), IPv6Interface),
(('2001:db00::1', 120), IPv6Interface),
(IPv4Interface('192.168.0.0/24'), IPv4Interface),
(IPv4Interface('192.168.0.1/24'), IPv4Interface),
(IPv6Interface('2001:db00::0/120'), IPv6Interface),
(IPv6Interface('2001:db00::1/120'), IPv6Interface),
],
)
def test_ipinterface_success(value, cls):
class Model(BaseModel):
ip: IPvAnyInterface = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,cls',
[
('192.168.0.0/24', IPv4Interface),
('192.168.0.1/24', IPv4Interface),
('192.168.128.0/30', IPv4Interface),
('192.168.128.1/30', IPv4Interface),
(2**32 - 1, IPv4Interface), # no mask equals to mask /32
(2**32 - 1, IPv4Interface), # so ``strict`` has no effect
(b'\xff\xff\xff\xff', IPv4Interface), # /32
(b'\xff\xff\xff\xff', IPv4Interface),
(('192.168.0.0', 24), IPv4Interface),
(('192.168.0.1', 24), IPv4Interface),
(IPv4Interface('192.168.0.0/24'), IPv4Interface),
(IPv4Interface('192.168.0.1/24'), IPv4Interface),
],
)
def test_ip_v4_interface_success(value, cls):
class Model(BaseModel):
ip: IPv4Interface
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,cls',
[
('2001:db00::0/120', IPv6Interface),
('2001:db00::1/120', IPv6Interface),
(20_282_409_603_651_670_423_947_251_286_015, IPv6Interface), # /128
(20_282_409_603_651_670_423_947_251_286_014, IPv6Interface),
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Interface),
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Interface),
(('2001:db00::0', 120), IPv6Interface),
(('2001:db00::1', 120), IPv6Interface),
(IPv6Interface('2001:db00::0/120'), IPv6Interface),
(IPv6Interface('2001:db00::1/120'), IPv6Interface),
],
)
def test_ip_v6_interface_success(value, cls):
class Model(BaseModel):
ip: IPv6Interface = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[
{
'loc': ('ip',),
'msg': 'value is not a valid IPv4 or IPv6 interface',
'type': 'value_error.ipvanyinterface',
}
],
),
(
'192.168.0.1.1.1/24',
[
{
'loc': ('ip',),
'msg': 'value is not a valid IPv4 or IPv6 interface',
'type': 'value_error.ipvanyinterface',
}
],
),
(
-1,
[
{
'loc': ('ip',),
'msg': 'value is not a valid IPv4 or IPv6 interface',
'type': 'value_error.ipvanyinterface',
}
],
),
(
2**128 + 1,
[
{
'loc': ('ip',),
'msg': 'value is not a valid IPv4 or IPv6 interface',
'type': 'value_error.ipvanyinterface',
}
],
),
],
)
def test_ipinterface_fails(value, errors):
class Model(BaseModel):
ip: IPvAnyInterface = None
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 interface', 'type': 'value_error.ipv4interface'}],
),
(
'192.168.0.1.1.1/24',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 interface', 'type': 'value_error.ipv4interface'}],
),
(-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 interface', 'type': 'value_error.ipv4interface'}]),
(
2**128 + 1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 interface', 'type': 'value_error.ipv4interface'}],
),
],
)
def test_ip_v4_interface_fails(value, errors):
class Model(BaseModel):
ip: IPv4Interface = None
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv6 interface', 'type': 'value_error.ipv6interface'}],
),
(
'192.168.0.1.1.1/24',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv6 interface', 'type': 'value_error.ipv6interface'}],
),
(-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv6 interface', 'type': 'value_error.ipv6interface'}]),
(
2**128 + 1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv6 interface', 'type': 'value_error.ipv6interface'}],
),
],
)
def test_ip_v6_interface_fails(value, errors):
class Model(BaseModel):
ip: IPv6Interface = None
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
| mit | 649112612963a295b4eeba9723dd845a | 30.994455 | 120 | 0.533191 | 3.155122 | false | true | false | false |
samuelcolvin/pydantic | pydantic/tools.py | 1 | 2834 | import json
from functools import lru_cache
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union
from .parse import Protocol, load_file, load_str_bytes
from .types import StrBytes
from .typing import display_as_type
__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of')
NameFactory = Union[str, Callable[[Type[Any]], str]]
if TYPE_CHECKING:
from .typing import DictStrAny
def _generate_parsing_type_name(type_: Any) -> str:
return f'ParsingModel[{display_as_type(type_)}]'
@lru_cache(maxsize=2048)
def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:
from pydantic.main import create_model
if type_name is None:
type_name = _generate_parsing_type_name
if not isinstance(type_name, str):
type_name = type_name(type_)
return create_model(type_name, __root__=(type_, ...))
T = TypeVar('T')
def parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:
model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]
return model_type(__root__=obj).__root__
def parse_file_as(
type_: Type[T],
path: Union[str, Path],
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
type_name: Optional[NameFactory] = None,
) -> T:
obj = load_file(
path,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=json_loads,
)
return parse_obj_as(type_, obj, type_name=type_name)
def parse_raw_as(
type_: Type[T],
b: StrBytes,
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
type_name: Optional[NameFactory] = None,
) -> T:
obj = load_str_bytes(
b,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=json_loads,
)
return parse_obj_as(type_, obj, type_name=type_name)
def schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny':
"""Generate a JSON schema (as dict) for the passed model or dynamically generated one"""
return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs)
def schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str:
"""Generate a JSON schema (as JSON) for the passed model or dynamically generated one"""
return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs)
| mit | d81cf21440c7a46b255bf3b5d91babd9 | 29.804348 | 105 | 0.645025 | 3.268743 | false | false | false | false |
samuelcolvin/pydantic | docs/examples/types_union_discriminated_nested.py | 1 | 1061 | from typing import Literal, Union
from typing_extensions import Annotated
from pydantic import BaseModel, Field, ValidationError
class BlackCat(BaseModel):
pet_type: Literal['cat']
color: Literal['black']
black_name: str
class WhiteCat(BaseModel):
pet_type: Literal['cat']
color: Literal['white']
white_name: str
# Can also be written with a custom root type
#
# class Cat(BaseModel):
# __root__: Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')]
Cat = Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')]
class Dog(BaseModel):
pet_type: Literal['dog']
name: str
Pet = Annotated[Union[Cat, Dog], Field(discriminator='pet_type')]
class Model(BaseModel):
pet: Pet
n: int
m = Model(pet={'pet_type': 'cat', 'color': 'black', 'black_name': 'felix'}, n=1)
print(m)
try:
Model(pet={'pet_type': 'cat', 'color': 'red'}, n='1')
except ValidationError as e:
print(e)
try:
Model(pet={'pet_type': 'cat', 'color': 'black'}, n='1')
except ValidationError as e:
print(e)
| mit | 492dccc3c8f3281b0cd57c374bfb9b08 | 20.22 | 80 | 0.663525 | 3.129794 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/simphony/components/mzi.py | 1 | 2582 | from typing import Callable, Optional
from gdsfactory.simulation.simphony.components.mmi1x2 import mmi1x2
from gdsfactory.simulation.simphony.components.straight import (
straight as straight_function,
)
def mzi(
delta_length: float = 10.0,
length_y: float = 4.0,
length_x: float = 0.1,
splitter: Callable = mmi1x2,
combiner: Optional[Callable] = None,
straight_top: Callable = straight_function,
straight_bot: Callable = straight_function,
port_name_splitter_w0: str = "o1",
port_name_splitter_e1: str = "o2",
port_name_splitter_e0: str = "o3",
port_name_combiner_w0: str = "o1",
port_name_combiner_e1: str = "o2",
port_name_combiner_e0: str = "o3",
):
"""Returns Mzi circuit model.
Args:
delta_length: bottom arm vertical extra length.
length_y: vertical length for both and top arms.
length_x: horizontal length.
splitter: model function for combiner.
combiner: model function for combiner.
wg: straight model function.
.. code::
__Lx__
| |
Ly Lyr
| |
splitter=| |==combiner
| |
Ly Lyr
| |
DL/2 DL/2
| |
|__Lx__|
.. plot::
:include-source:
import gdsfactory as gf
c = gf.components.mzi(delta_length=10)
c.plot()
.. plot::
:include-source:
import gdsfactory.simulation.simphony as gs
import gdsfactory.simulation.simphony.components as gc
c = gc.mzi()
gs.plot_circuit(c)
"""
combiner = combiner or splitter
splitter = splitter() if callable(splitter) else splitter
combiner = combiner() if callable(combiner) else combiner
wg_short = straight_top(length=2 * length_y + length_x)
wg_long = straight_bot(length=2 * length_y + delta_length + length_x)
splitter[port_name_combiner_e0].connect(wg_long["o1"])
splitter[port_name_combiner_e1].connect(wg_short["o1"])
combiner[port_name_combiner_e0].connect(wg_long["o2"])
combiner[port_name_combiner_e1].connect(wg_short["o2"])
splitter[port_name_splitter_w0].rename("o1")
combiner[port_name_combiner_w0].rename("o2")
return splitter.circuit.to_subcircuit()
if __name__ == "__main__":
import matplotlib.pyplot as plt
from gdsfactory.simulation.simphony.plot_circuit import plot_circuit
c = mzi()
plot_circuit(c)
plt.show()
| mit | deeb3e3028d567dc90094c37688eaab3 | 26.178947 | 73 | 0.590627 | 3.38401 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/gmeep/get_meep_geometry.py | 1 | 5618 | from typing import Dict, List, Optional, Union
import meep as mp
import numpy as np
import gdsfactory as gf
from gdsfactory.pdk import get_layer_stack
from gdsfactory.simulation.gmeep.get_material import get_material
from gdsfactory.types import ComponentSpec, CrossSectionSpec, LayerStack
def get_meep_geometry_from_component(
component: ComponentSpec,
layer_stack: Optional[LayerStack] = None,
material_name_to_meep: Optional[Dict[str, Union[str, float]]] = None,
wavelength: float = 1.55,
is_3d: bool = False,
dispersive: bool = False,
**kwargs,
) -> List[mp.GeometricObject]:
"""Returns Meep geometry from a gdsfactory component.
Args:
component: gdsfactory component.
layer_stack: for material layers.
material_name_to_meep: maps layer_stack name to meep material name.
wavelength: in um.
is_3d: renders in 3D.
dispersive: add dispersion.
kwargs: settings.
"""
component = gf.get_component(component=component, **kwargs)
component_ref = component.ref()
layer_stack = layer_stack or get_layer_stack()
layer_to_thickness = layer_stack.get_layer_to_thickness()
layer_to_material = layer_stack.get_layer_to_material()
layer_to_zmin = layer_stack.get_layer_to_zmin()
layer_to_sidewall_angle = layer_stack.get_layer_to_sidewall_angle()
geometry = []
layer_to_polygons = component_ref.get_polygons(by_spec=True)
for layer, polygons in layer_to_polygons.items():
if layer in layer_to_thickness and layer in layer_to_material:
height = layer_to_thickness[layer] if is_3d else mp.inf
zmin_um = layer_to_zmin[layer] if is_3d else 0
# center = mp.Vector3(0, 0, (zmin_um + height) / 2)
for polygon in polygons:
vertices = [mp.Vector3(p[0], p[1], zmin_um) for p in polygon]
material_name = layer_to_material[layer]
if material_name:
material = get_material(
name=material_name,
dispersive=dispersive,
material_name_to_meep=material_name_to_meep,
wavelength=wavelength,
)
geometry.append(
mp.Prism(
vertices=vertices,
height=height,
sidewall_angle=layer_to_sidewall_angle[layer],
material=material,
# center=center
)
)
return geometry
def get_meep_geometry_from_cross_section(
cross_section: CrossSectionSpec,
extension_length: Optional[float] = None,
layer_stack: Optional[LayerStack] = None,
material_name_to_meep: Optional[Dict[str, Union[str, float]]] = None,
wavelength: float = 1.55,
dispersive: bool = False,
**kwargs,
) -> List[mp.GeometricObject]:
x = gf.get_cross_section(cross_section=cross_section, **kwargs)
x_sections = [
gf.Section(offset=x.offset, layer=x.layer, width=x.width),
*x.sections,
]
layer_stack = layer_stack or get_layer_stack()
layer_to_thickness = layer_stack.get_layer_to_thickness()
layer_to_material = layer_stack.get_layer_to_material()
layer_to_zmin = layer_stack.get_layer_to_zmin()
layer_to_sidewall_angle = layer_stack.get_layer_to_sidewall_angle()
geometry = []
for section in x_sections:
print(f"section: {section}")
layer = gf.get_layer(section.layer)
if layer in layer_to_thickness and layer in layer_to_material:
height = layer_to_thickness[layer]
width = section.width
offset = section.offset
zmin_um = layer_to_zmin[layer] + (0 if height > 0 else -height)
# center = mp.Vector3(0, 0, (zmin_um + height) / 2)
material_name = layer_to_material[layer]
material = get_material(
name=material_name,
dispersive=dispersive,
material_name_to_meep=material_name_to_meep,
wavelength=wavelength,
)
index = material.epsilon(1 / wavelength)[0, 0] ** 0.5
print(f"add {material_name!r} layer with index {index}")
# Don't need to use prism unless using sidewall angles
if layer in layer_to_sidewall_angle:
# If using a prism, all dimensions need to be finite
xspan = extension_length or 1
p = mp.Prism(
vertices=[
mp.Vector3(x=-xspan / 2, y=-width / 2, z=zmin_um),
mp.Vector3(x=-xspan / 2, y=width / 2, z=zmin_um),
mp.Vector3(x=xspan / 2, y=width / 2, z=zmin_um),
mp.Vector3(x=xspan / 2, y=-width / 2, z=zmin_um),
],
height=height,
center=mp.Vector3(y=offset, z=height / 2 + zmin_um),
sidewall_angle=np.deg2rad(layer_to_sidewall_angle[layer]),
material=material,
)
geometry.append(p)
else:
xspan = extension_length or mp.inf
geometry.append(
mp.Block(
size=mp.Vector3(xspan, width, height),
material=material,
center=mp.Vector3(y=offset, z=height / 2 + zmin_um),
)
)
return geometry
| mit | 0fb78b86b38dc1089ffa40a250f02ca2 | 37.744828 | 78 | 0.55874 | 3.788267 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/litho_ruler.py | 1 | 1206 | from typing import Tuple
import gdsfactory as gf
from gdsfactory.types import LayerSpec
@gf.cell
def litho_ruler(
height: float = 2,
width: float = 0.5,
spacing: float = 2.0,
scale: Tuple[float, ...] = (3, 1, 1, 1, 1, 2, 1, 1, 1, 1),
num_marks: int = 21,
layer: LayerSpec = "WG",
) -> gf.Component:
"""Ruler structure for lithographic measurement.
Includes marks of varying scales to allow for easy reading by eye.
based on phidl.geometry
Args:
height: Height of the ruling marks in um.
width: Width of the ruling marks in um.
spacing: Center-to-center spacing of the ruling marks in um.
scale: Height scale pattern of marks.
num_marks: Total number of marks to generate.
layer: Specific layer to put the ruler geometry on.
"""
D = gf.Component()
for n in range(num_marks):
h = height * scale[n % len(scale)]
D << gf.components.rectangle(size=(width, h), layer=layer)
D.distribute(direction="x", spacing=spacing, separation=False, edge="x")
D.align(alignment="ymin")
D.flatten()
return D
if __name__ == "__main__":
c = litho_ruler()
c.show(show_ports=True)
| mit | 9a8a11ce91efa0e2c3f260045509b48d | 27.046512 | 76 | 0.625207 | 3.38764 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/gmeep/meep_adjoint_optimization.py | 1 | 12258 | from types import LambdaType
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import nlopt
import numpy as np
from meep import Block, EigenModeSource, MaterialGrid, Simulation, Vector3, Volume
from meep.adjoint import DesignRegion, EigenmodeCoefficient, OptimizationProblem
from meep.visualization import get_2D_dimensions
from numpy import ndarray
import gdsfactory as gf
from gdsfactory import Component
from gdsfactory.simulation.gmeep import get_simulation
from gdsfactory.tech import LayerStack
from gdsfactory.types import Layer
def get_meep_adjoint_optimizer(
component: Component,
objective_function: Callable,
design_regions: List[DesignRegion],
design_variables: List[MaterialGrid],
design_update: np.ndarray,
TE_mode_number: int = 1,
resolution: int = 30,
cell_size: Optional[Tuple] = None,
extend_ports_length: Optional[float] = 10.0,
layer_stack: Optional[LayerStack] = None,
zmargin_top: float = 3.0,
zmargin_bot: float = 3.0,
tpml: float = 1.5,
clad_material: str = "SiO2",
is_3d: bool = False,
wavelength_start: float = 1.5,
wavelength_stop: float = 1.6,
wavelength_points: int = 50,
dfcen: float = 0.2,
port_source_name: str = "o1",
port_margin: float = 3,
distance_source_to_monitors: float = 0.2,
port_source_offset: float = 0,
port_monitor_offset: float = 0,
dispersive: bool = False,
material_name_to_meep: Optional[Dict[str, Union[str, float]]] = None,
**settings,
):
"""Return a Meep `OptimizationProblem` object.
Args:
component: gdsfactory component.
objective_function: functions must be composed of "field functions" that transform the recorded fields.
design_regions: list of DesignRegion objects.
design_variables: list of MaterialGrid objects.
design_update: ndarray to intializethe optimization.
TE_mode_number: TE mode number.
resolution: in pixels/um (20: for coarse, 120: for fine).
cell_size: tuple of Simulation object dimensions in um.
extend_ports_length: to extend ports beyond the PML.
layer_stack: contains layer to thickness, zmin and material.
Defaults to active pdk.layer_stack.
zmargin_top: thickness for cladding above core.
zmargin_bot: thickness for cladding below core.
tpml: PML thickness (um).
clad_material: material for cladding.
is_3d: if True runs in 3D.
wavelength_start: wavelength min (um).
wavelength_stop: wavelength max (um).
wavelength_points: wavelength steps.
dfcen: delta frequency.
port_source_name: input port name.
port_margin: margin on each side of the port.
distance_source_to_monitors: in (um) source goes before.
port_source_offset: offset between source GDS port and source MEEP port.
port_monitor_offset: offset between monitor GDS port and monitor MEEP port.
dispersive: use dispersive material models (requires higher resolution).
material_name_to_meep: map layer_stack names with meep material database name
or refractive index. dispersive materials have a wavelength dependent index.
Keyword Args:
settings: extra simulation settings (resolution, symmetries, etc.)
Returns:
opt: OptimizationProblem object
"""
sim_dict = get_simulation(
component,
resolution=resolution,
extend_ports_length=extend_ports_length,
layer_stack=layer_stack,
zmargin_top=zmargin_top,
zmargin_bot=zmargin_bot,
tpml=tpml,
clad_material=clad_material,
is_3d=is_3d,
wavelength_start=wavelength_start,
wavelength_stop=wavelength_stop,
wavelength_points=wavelength_points,
dfcen=dfcen,
port_source_name=port_source_name,
port_margin=port_margin,
distance_source_to_monitors=distance_source_to_monitors,
port_source_offset=port_source_offset,
port_monitor_offset=port_monitor_offset,
dispersive=dispersive,
material_name_to_meep=material_name_to_meep,
**settings,
)
sim = sim_dict["sim"]
design_regions_geoms = [
Block(
center=design_region.center,
size=design_region.size,
material=design_variable,
)
for design_region, design_variable in zip(design_regions, design_variables)
]
for design_region_geom in design_regions_geoms:
sim.geometry.append(design_region_geom)
cell_thickness = sim.cell_size[2]
monitors = sim_dict["monitors"]
ob_list = [
EigenmodeCoefficient(
sim,
Volume(
center=monitor.regions[0].center,
size=monitor.regions[0].size,
),
TE_mode_number,
)
for monitor in monitors.values()
]
c = component.copy()
for design_region, design_variable in zip(design_regions, design_variables):
sim.geometry.append(
Block(design_region.size, design_region.center, material=design_variable)
)
block = c << gf.components.rectangle(
(design_region.size[0], design_region.size[1])
)
block.center = (design_region.center[0], design_region.center[1])
sim.cell_size = (
Vector3(*cell_size)
if cell_size
else Vector3(
c.xsize + 2 * sim.boundary_layers[0].thickness,
c.ysize + 2 * sim.boundary_layers[0].thickness,
cell_thickness,
)
)
source = [
EigenModeSource(
sim.sources[0].src,
eig_band=1,
direction=sim.sources[0].direction,
eig_kpoint=Vector3(1, 0, 0),
size=sim.sources[0].size,
center=sim.sources[0].center,
)
]
sim.sources = source
opt = OptimizationProblem(
simulation=sim,
objective_functions=[objective_function],
objective_arguments=ob_list,
design_regions=design_regions,
frequencies=sim_dict["freqs"],
decay_by=settings.get("decay_by", 1e-5),
)
opt.update_design([design_update])
opt.plot2D(True)
return opt
def run_meep_adjoint_optimizer(
number_of_params: int,
cost_function: LambdaType,
update_variable: np.ndarray,
maximize_cost_function: bool = True,
algorithm: int = nlopt.LD_MMA,
lower_bound: Any = 0,
upper_bound: Any = 1,
maxeval: int = 10,
get_optimized_component: bool = False,
opt: OptimizationProblem = None,
**kwargs,
) -> Union[ndarray, Component]:
"""Run adjoint optimization using Meep.
Args:
number_of_params: number of parameters to optimize (usually resolution_in_x * resolution_in_y).
cost_function: cost function to optimize.
update_variable: variable to update the optimization with.
maximize_cost_function: if True, maximize the cost function, else minimize it.
algorithm: nlopt algorithm to use (default: nlopt.LD_MMA).
lower_bound: lower bound for the optimization.
upper_bound: upper bound for the optimization.
maxeval: maximum number of evaluations.
get_optimized_component: if True, returns the optimized gdsfactory Component.
If this is True, the O ptimization object used for the optimization must be passed as an argument.
opt: OptimizationProblem object used for the optimization. Used only if get_optimized_component is True.
Keyword Args:
fcen: center frequency of the source.
upscale_factor: upscale factor for the optimization's grid.
threshold_offset_from_max: threshold offset from max eps value.
layer: layer to apply to the optimized component.
"""
solver = nlopt.opt(algorithm, number_of_params)
solver.set_lower_bounds(lower_bound)
solver.set_upper_bounds(upper_bound)
if maximize_cost_function:
solver.set_max_objective(cost_function)
else:
solver.set_min_objective(cost_function)
solver.set_maxeval(maxeval)
update_variable[:] = solver.optimize(update_variable)
if get_optimized_component:
fcen = kwargs.get("fcen", 1 / 1.55)
upscale_factor = kwargs.get("upscale_factor", 2)
threshold_offset_from_max = kwargs.get("threshold_offset_from_max", 0.01)
layer = kwargs.get("layer", (1, 0))
return get_component_from_sim(
opt.sim, fcen, upscale_factor, threshold_offset_from_max, layer
)
return update_variable
def get_component_from_sim(
sim: Simulation,
fcen: float = 1 / 1.55,
upscale_factor: int = 2,
threshold_offset_from_max: float = 2.0,
layer: Layer = (1, 0),
) -> Component:
"""Get gdsfactory Component from Meep Simulation object.
Args:
sim: Meep Simulation object.
fcen: center frequency of the source.
upscale_factor: upscale factor for the optimization's grid.
threshold_offset_from_max: threshold offset from max eps value.
layer: layer to apply to the optimized component.
Returns:
gdsfactory Component.
"""
grid_resolution = upscale_factor * sim.resolution
sim_center, sim_size = get_2D_dimensions(sim, output_plane=None)
xmin = sim_center.x - sim_size.x / 2
xmax = sim_center.x + sim_size.x / 2
ymin = sim_center.y - sim_size.y / 2
ymax = sim_center.y + sim_size.y / 2
Nx = int((xmax - xmin) * grid_resolution + 1)
Ny = int((ymax - ymin) * grid_resolution + 1)
xtics = np.linspace(xmin, xmax, Nx)
ytics = np.linspace(ymin, ymax, Ny)
ztics = np.array([sim_center.z])
eps_data = np.real(sim.get_epsilon_grid(xtics, ytics, ztics, frequency=fcen))
return gf.read.from_np(
eps_data,
nm_per_pixel=1e3 / grid_resolution,
layer=layer,
threshold=np.max(eps_data) - threshold_offset_from_max,
)
def _example_optim_geometry() -> Component:
"""Dummy example of a component to optimize."""
from meep import Medium
design_region_width = 5
design_region_height = 4
resolution = 20
design_region_resolution = int(5 * resolution)
Nx = int(design_region_resolution * design_region_width)
Ny = int(design_region_resolution * design_region_height)
pml_size = 1.0
waveguide_length = 0.5
Sx = 2 * pml_size + 2 * waveguide_length + design_region_width
SiO2 = Medium(index=1.44)
Si = Medium(index=3.4)
design_variables = MaterialGrid(Vector3(Nx, Ny), SiO2, Si, grid_type="U_MEAN")
design_region = DesignRegion(
design_variables,
volume=Volume(
center=Vector3(),
size=Vector3(design_region_width, design_region_height, 0),
),
)
c = Component("mmi1x2")
arm_separation = 1.0
straight1 = c << gf.components.straight(Sx / 2 + 1)
straight1.move(straight1.ports["o2"], (-design_region_width / 2.0, 0))
straight2 = c << gf.components.straight(Sx / 2 + 1)
straight2.move(
straight2.ports["o1"], (design_region_width / 2.0, (arm_separation + 1.0) / 2.0)
)
straight3 = c << gf.components.straight(Sx / 2 + 1)
straight3.move(
straight3.ports["o1"],
(design_region_width / 2.0, (-arm_separation - 1.0) / 2.0),
)
c.add_port("o1", port=straight1.ports["o1"])
c.add_port("o2", port=straight2.ports["o2"])
c.add_port("o3", port=straight3.ports["o2"])
return design_region, design_variables, c, Nx, Ny
if __name__ == "__main__":
import autograd.numpy as npa
eta_i = 0.5
design_region, design_variables, c, Nx, Ny = _example_optim_geometry()
seed = 240
np.random.seed(seed)
x0 = np.random.rand(
Nx * Ny,
)
def J(source, top, bottom):
power = npa.abs(top / source) ** 2 + npa.abs(bottom / source) ** 2
return npa.mean(power)
opt = get_meep_adjoint_optimizer(
c,
J,
[design_region],
[design_variables],
x0,
cell_size=(15, 8),
extend_ports_length=0,
port_margin=0.75,
port_source_offset=-3.5,
port_monitor_offset=-3.5,
)
opt.plot2D(True)
| mit | af8f0210282d2d92e2b2929e6f1bfcb4 | 32.955679 | 112 | 0.63738 | 3.576889 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/spiral_double.py | 1 | 2009 | import gdsfactory as gf
from gdsfactory.components import bend_circular
from gdsfactory.path import spiral_archimedean
@gf.cell
def spiral_double(
min_bend_radius: float = 10.0,
separation: float = 2.0,
number_of_loops: float = 3,
npoints: int = 1000,
cross_section: gf.types.CrossSectionSpec = "strip",
bend: gf.types.ComponentSpec = bend_circular,
) -> gf.Component:
"""Returns a spiral double (spiral in, and then out).
Args:
min_bend_radius: inner radius of the spiral.
separation: separation between the loops.
number_of_loops: number of loops per spiral.
npoints: points for the spiral.
cross_section: cross-section to extrude the structure with.
bend: factory for the bends in the middle of the double spiral.
"""
component = gf.Component()
bend = gf.get_component(
bend, radius=min_bend_radius / 2, angle=180, cross_section=cross_section
)
bend1 = component.add_ref(bend).mirror()
bend2 = component.add_ref(bend)
bend2.connect("o2", bend1.ports["o1"])
path = spiral_archimedean(
min_bend_radius=min_bend_radius,
separation=separation,
number_of_loops=number_of_loops,
npoints=npoints,
)
path.start_angle = 0
path.end_angle = 0
spiral = path.extrude(cross_section=cross_section)
spiral1 = component.add_ref(spiral).connect("o1", bend1.ports["o2"])
spiral2 = component.add_ref(spiral).connect("o1", bend2.ports["o1"])
component.add_port("o1", port=spiral1.ports["o2"])
component.add_port("o2", port=spiral2.ports["o2"])
component.info["length"] = float(path.length() + bend.info["length"]) * 2
return component
if __name__ == "__main__":
c = spiral_double(
min_bend_radius=10,
separation=2,
number_of_loops=3,
npoints=1000,
cross_section="nitride",
)
print(c.ports["o1"].orientation)
print(c.ports["o2"].orientation)
c.show(show_ports=True)
| mit | 94c88dba227abc1b554ddc6eff5dea25 | 30.390625 | 80 | 0.645097 | 3.081288 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/lumerical/read.py | 1 | 4209 | import re
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.config import logger
from gdsfactory.simulation.get_sparameters_path import (
get_sparameters_path_lumerical as get_sparameters_path,
)
from gdsfactory.tech import LAYER_STACK, LayerStack
def get_ports(line: str) -> Tuple[str, str]:
"""Returns 2 port labels strings from interconnect file."""
line = line.replace('"', "")
line = line.replace("(", "")
line_fields = line.split(",")
port1 = line_fields[0]
port2 = line_fields[3]
return port1, port2
def read_sparameters_file(
filepath, numports: int
) -> Tuple[Tuple[str, ...], np.array, np.ndarray]:
r"""Returns Sparameters from Lumerical interconnect export file.
Args:
filepath: Sparameters filepath (interconnect format).
numports: number of ports.
Returns:
port_names: list of port labels.
F: frequency 1d np.array.
S: Sparameters np.ndarray matrix.
"""
F = []
S = []
port_names = []
with open(filepath) as fid:
for _i in range(numports):
port_line = fid.readline()
m = re.search(r'\[".*",', port_line)
if m:
port = m[0]
port_names.append(port[2:-2])
line = fid.readline()
port1, port2 = get_ports(line)
line = fid.readline()
numrows = int(tuple(line[1:-2].split(","))[0])
S = np.zeros((numrows, numports, numports), dtype="complex128")
r = m = n = 0
for line in fid:
if line[0] == "(":
if "transmission" in line:
port1, port2 = get_ports(line)
continue
data = line.split()
data = list(map(float, data))
if m == 0 and n == 0:
F.append(data[0])
i = port_names.index(port1)
j = port_names.index(port2)
S[r, i, j] = data[1] * np.exp(1j * data[2])
r += 1
if r == numrows:
r = 0
m += 1
if m == numports:
m = 0
n += 1
if n == numports:
break
# port_names.reverse()
# print(len(F), S.shape, len(port_names))
return tuple(port_names), np.array(F), S
def read_sparameters_lumerical(
component: Optional[Component] = None,
layer_stack: LayerStack = LAYER_STACK,
filepath: Optional[str] = None,
numports: Optional[int] = None,
dirpath: Path = gf.PATH.sparameters,
**kwargs,
) -> Tuple[List[str], np.array, np.ndarray]:
r"""Returns Sparameters from Lumerical interconnect .DAT file.
Args:
component: Component.
layer_stack: layer thickness and material.
filepath: for file.
numports: number of ports.
dirpath: path where to look for the Sparameters.
Keyword Args:
simulation_settings.
Returns:
port_names: list of port labels.
F: frequency 1d np.array.
S: Sparameters np.ndarray matrix.
the Sparameters file have Lumerical format
https://support.lumerical.com/hc/en-us/articles/360036107914-Optical-N-Port-S-Parameter-SPAR-INTERCONNECT-Element#toc_5
"""
if component is None and filepath is None:
raise ValueError("You need to define the filepath or the component")
if filepath and numports is None:
raise ValueError("You need to define numports")
filepath = filepath or get_sparameters_path(
component=component, dirpath=dirpath, layer_stack=layer_stack, **kwargs
).with_suffix(".dat")
numports = numports or len(component.ports)
if not filepath.exists():
raise ValueError(f"Sparameters for {component.name!r} not found in {filepath}")
assert numports > 1, f"number of ports = {numports} and needs to be > 1"
logger.info(f"Sparameters loaded from {filepath}")
return read_sparameters_file(filepath=filepath, numports=numports)
if __name__ == "__main__":
r = read_sparameters_lumerical(gf.components.mmi1x2())
| mit | 02dab1c4f5e6de4e98843a6f32694def | 30.410448 | 123 | 0.593253 | 3.698594 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/gtidy3d/materials.py | 1 | 3129 | from functools import partial
from typing import Dict, Union
import tidy3d as td
from tidy3d.components.medium import PoleResidue
from tidy3d.components.types import ComplexNumber
from tidy3d.material_library import material_library
MATERIAL_NAME_TO_MEDIUM = {
"si": material_library["cSi"]["Li1993_293K"],
"csi": material_library["cSi"]["Li1993_293K"],
"sio2": material_library["SiO2"]["Horiba"],
"sin": material_library["Si3N4"]["Luke2015"],
"si3n4": material_library["Si3N4"]["Luke2015"],
}
# not dispersive materials have a constant index
MATERIAL_NAME_TO_TIDY3D_INDEX = {
"si": 3.47,
"sio2": 1.44,
"sin": 2.0,
}
# dispersive materials
MATERIAL_NAME_TO_TIDY3D_NAME = {
"si": "cSi",
"sio2": "SiO2",
"sin": "Si3N4",
}
def get_epsilon(
name_or_index: Union[str, float],
wavelength: float = 1.55,
material_name_to_medium: Dict[str, PoleResidue] = MATERIAL_NAME_TO_MEDIUM,
) -> ComplexNumber:
"""Return permittivity from material database.
Args:
name_or_index: material name or refractive index.
wavelength: wavelength (um).
material_name_to_medium: map name to medium.
"""
medium = get_medium(
name_or_index=name_or_index, material_name_to_medium=material_name_to_medium
)
frequency = td.C_0 / wavelength
return medium.eps_model(frequency)
def get_index(
name_or_index: Union[str, float],
wavelength: float = 1.55,
material_name_to_medium: Dict[str, PoleResidue] = MATERIAL_NAME_TO_MEDIUM,
) -> float:
"""Return refractive index from material database.
Args:
wavelength: wavelength (um).
name_or_index: material name or refractive index.
material_name_to_medium: map name to medium.
"""
eps_complex = get_epsilon(
wavelength=wavelength,
name_or_index=name_or_index,
material_name_to_medium=material_name_to_medium,
)
n, _ = td.Medium.eps_complex_to_nk(eps_complex)
return n
def get_medium(
name_or_index: Union[str, float],
material_name_to_medium: Dict[str, PoleResidue] = MATERIAL_NAME_TO_MEDIUM,
) -> td.Medium:
"""Return Medium from materials database.
Args:
name_or_index: material name or refractive index.
material_name_to_medium: map name to medium.
"""
name_or_index = (
name_or_index.lower() if isinstance(name_or_index, str) else name_or_index
)
if isinstance(name_or_index, (int, float)):
m = td.Medium(permittivity=name_or_index**2)
elif name_or_index in material_name_to_medium:
m = material_name_to_medium[name_or_index]
else:
materials = list(material_name_to_medium.keys())
raise ValueError(f"Material {name_or_index!r} not in {materials}")
return m
si = partial(get_index, "si")
sio2 = partial(get_index, "sio2")
sin = partial(get_index, "sin")
if __name__ == "__main__":
print(si(1.55))
print(si(1.31))
# print(get_index(name_or_index="cSi"))
# print(get_index(name_or_index=3.4))
# m = get_medium(name_or_index="SiO2")
# m = td.Medium(permittivity=1.45 ** 2)
| mit | 2588e3b907f61668c7927a29204554b0 | 27.189189 | 84 | 0.646532 | 3.02027 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/tests/test_get_bundle_optical.py | 1 | 1672 | from pytest_regressions.data_regression import DataRegressionFixture
import gdsfactory as gf
from gdsfactory.component import Component
def test_get_bundle_optical(
data_regression: DataRegressionFixture, check: bool = True
) -> Component:
lengths = {}
c = gf.Component("test_get_bundle_optical")
w = c << gf.components.straight_array(n=4, spacing=200)
d = c << gf.components.nxn(west=4, east=0)
d.y = w.y
d.xmin = w.xmax + 200
ports1 = [
w.ports["o7"],
w.ports["o8"],
]
ports2 = [
d.ports["o2"],
d.ports["o1"],
]
routes = gf.routing.get_bundle(ports1, ports2, sort_ports=True, radius=10)
for i, route in enumerate(routes):
c.add(route.references)
lengths[i] = route.length
if check:
data_regression.check(lengths)
return c
def test_get_bundle_optical2(
data_regression: DataRegressionFixture, check: bool = True
) -> Component:
lengths = {}
c = gf.Component("test_get_bundle_optical2")
w = c << gf.components.straight_array(n=4, spacing=200)
d = c << gf.components.nxn(west=4, east=1)
d.y = w.y
d.xmin = w.xmax + 200
ports1 = w.get_ports_list(orientation=0)
ports2 = d.get_ports_list(orientation=180)
routes = gf.routing.get_bundle(ports1, ports2, sort_ports=True)
for i, route in enumerate(routes):
c.add(route.references)
lengths[i] = route.length
if check:
data_regression.check(lengths)
return c
if __name__ == "__main__":
c = test_get_bundle_optical(None, check=False)
# c = test_get_bundle_optical2(None, check=False)
c.show(show_ports=True)
| mit | ba8d757d4bfdd99574be1ac7044b75ea | 23.588235 | 78 | 0.626196 | 3.154717 | false | true | false | false |
gdsfactory/gdsfactory | gdsfactory/geometry/functions.py | 1 | 6628 | from typing import Optional, Union
import numpy as np
from numpy import cos, float64, ndarray, sin
RAD2DEG = 180.0 / np.pi
DEG2RAD = 1 / RAD2DEG
def sign_shape(pts: ndarray) -> float64:
pts2 = np.roll(pts, 1, axis=0)
dx = pts2[:, 0] - pts[:, 0]
y = pts2[:, 1] + pts[:, 1]
return np.sign((dx * y).sum())
def area(pts: ndarray) -> float64:
"""Returns the area."""
pts2 = np.roll(pts, 1, axis=0)
dx = pts2[:, 0] - pts[:, 0]
y = pts2[:, 1] + pts[:, 1]
return (dx * y).sum() / 2
def manhattan_direction(p0, p1, tol=1e-5):
"""Returns manhattan direction between 2 points."""
dp = p1 - p0
dx, dy = dp[0], dp[1]
if abs(dx) < tol:
sx = 0
elif dx > 0:
sx = 1
else:
sx = -1
if abs(dy) < tol:
sy = 0
elif dy > 0:
sy = 1
else:
sy = -1
return np.array((sx, sy))
def remove_flat_angles(points: ndarray) -> ndarray:
a = angles_deg(np.vstack(points))
da = a - np.roll(a, 1)
da = np.mod(np.round(da, 3), 180)
# To make sure we do not remove points at the edges
da[0] = 1
da[-1] = 1
to_rm = list(np.where(np.abs(da[:-1]) < 1e-9)[0])
if isinstance(points, list):
while to_rm:
i = to_rm.pop()
points.pop(i)
else:
points = points[da != 0]
return points
def remove_identicals(
pts: ndarray, grids_per_unit: int = 1000, closed: bool = True
) -> ndarray:
if len(pts) > 1:
identicals = np.prod(abs(pts - np.roll(pts, -1, 0)) < 0.5 / grids_per_unit, 1)
if not closed:
identicals[-1] = False
pts = np.delete(pts, identicals.nonzero()[0], 0)
return pts
def centered_diff(a: ndarray) -> ndarray:
d = (np.roll(a, -1, axis=0) - np.roll(a, 1, axis=0)) / 2
return d[1:-1]
def centered_diff2(a: ndarray) -> ndarray:
d = (np.roll(a, -1, axis=0) - a) - (a - np.roll(a, 1, axis=0))
return d[1:-1]
def curvature(points: ndarray, t: ndarray) -> ndarray:
"""Args are the points and the tangents at each point.
points : numpy.array shape (n, 2)
t: numpy.array of size n
Return:
The curvature at each point.
Computes the curvature at every point excluding the first and last point.
For a planar curve parametrized as P(t) = (x(t), y(t)), the curvature is given
by (x' y'' - x'' y' ) / (x' **2 + y' **2)**(3/2)
"""
# Use centered difference for derivative
dt = centered_diff(t)
dp = centered_diff(points)
dp2 = centered_diff2(points)
dx = dp[:, 0] / dt
dy = dp[:, 1] / dt
dx2 = dp2[:, 0] / dt**2
dy2 = dp2[:, 1] / dt**2
return (dx * dy2 - dx2 * dy) / (dx**2 + dy**2) ** (3 / 2)
def radius_of_curvature(points, t):
return 1 / curvature(points, t)
def path_length(points: ndarray) -> float64:
"""Returns: The path length.
Args:
points: With shape (N, 2) representing N points with coordinates x, y.
"""
dpts = points[1:, :] - points[:-1, :]
_d = dpts**2
return np.sum(np.sqrt(_d[:, 0] + _d[:, 1]))
def snap_angle(a: float64) -> int:
"""Returns angle snapped along manhattan angle (0, 90, 180, 270).
a: angle in deg
Return angle snapped along manhattan angle
"""
a = a % 360
if -45 < a < 45:
return 0
elif 45 < a < 135:
return 90
elif 135 < a < 225:
return 180
elif 225 < a < 315:
return 270
else:
return 0
def angles_rad(pts: ndarray) -> ndarray:
"""Returns the angles (radians) of the connection between each point and the next."""
_pts = np.roll(pts, -1, 0)
return np.arctan2(_pts[:, 1] - pts[:, 1], _pts[:, 0] - pts[:, 0])
def angles_deg(pts: ndarray) -> ndarray:
"""Returns the angles (degrees) of the connection between each point and the next."""
return angles_rad(pts) * RAD2DEG
def extrude_path(
points: ndarray,
width: float,
with_manhattan_facing_angles: bool = True,
spike_length: Union[float64, int, float] = 0,
start_angle: Optional[int] = None,
end_angle: Optional[int] = None,
grid: Optional[float] = None,
) -> ndarray:
"""Deprecated. Use gdsfactory.path.Path.extrude() instead.
Extrude a path of `width` along a curve defined by `points`.
Args:
points: numpy 2D array of shape (N, 2).
width: of the path to extrude.
with_manhattan_facing_angles: snaps to manhattan angles.
spike_length: in um.
start_angle: in degrees.
end_angle: in degrees.
grid: in um.
Returns:
numpy 2D array of shape (2*N, 2).
"""
from gdsfactory.pdk import get_grid_size
grid = grid or get_grid_size()
if isinstance(points, list):
points = np.stack([(p[0], p[1]) for p in points], axis=0)
a = angles_deg(points)
if with_manhattan_facing_angles:
_start_angle = snap_angle(a[0] + 180)
_end_angle = snap_angle(a[-2])
else:
_start_angle = a[0] + 180
_end_angle = a[-2]
start_angle = start_angle if start_angle is not None else _start_angle
end_angle = end_angle if end_angle is not None else _end_angle
a2 = angles_rad(points) * 0.5
a1 = np.roll(a2, 1)
a2[-1] = end_angle * DEG2RAD - a2[-2]
a1[0] = start_angle * DEG2RAD - a1[1]
a_plus = a2 + a1
cos_a_min = np.cos(a2 - a1)
offsets = np.column_stack((-sin(a_plus) / cos_a_min, cos(a_plus) / cos_a_min)) * (
0.5 * width
)
points_back = np.flipud(points - offsets)
if spike_length != 0:
d = spike_length
a_start = start_angle * DEG2RAD
a_end = end_angle * DEG2RAD
p_start_spike = points[0] + d * np.array([[cos(a_start), sin(a_start)]])
p_end_spike = points[-1] + d * np.array([[cos(a_end), sin(a_end)]])
pts = np.vstack((p_start_spike, points + offsets, p_end_spike, points_back))
else:
pts = np.vstack((points + offsets, points_back))
pts = np.round(pts / grid) * grid
return pts
def polygon_grow(polygon: ndarray, offset: float) -> ndarray:
"""Returns a grown closed shaped polygon by an offset."""
s = remove_identicals(polygon)
s = remove_flat_angles(s)
s = np.vstack([s, s[0]])
if len(s) <= 1:
return s
# Make sure the shape is oriented in the correct direction for scaling
ss = sign_shape(s)
offset *= -ss
a2 = angles_rad(s) * 0.5
a1 = np.roll(a2, 1)
a2[-1] = a2[0]
a1[0] = a1[-1]
a = a2 + a1
c_minus = cos(a2 - a1)
offsets = np.column_stack((-sin(a) / c_minus, cos(a) / c_minus)) * offset
return s + offsets
| mit | 7b4349f8d3e9b2f2df117b858da76485 | 25.094488 | 89 | 0.560501 | 2.947088 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/sax/models.py | 1 | 7322 | import jax.numpy as jnp
from sax.typing_ import SDict
from sax.utils import reciprocal
nm = 1e-3
def straight(
*,
wl: float = 1.55,
wl0: float = 1.55,
neff: float = 2.34,
ng: float = 3.4,
length: float = 10.0,
loss: float = 0.0,
) -> SDict:
"""Dispersive straight waveguide model.
based on sax.models
Args:
wl: wavelength.
wl0: center wavelength.
neff: effective index.
ng: group index.
length: um.
loss: in dB/um.
.. code::
o1 -------------- o2
length
"""
dwl = wl - wl0
dneff_dwl = (ng - neff) / wl0
neff -= dwl * dneff_dwl
phase = 2 * jnp.pi * neff * length / wl
amplitude = jnp.asarray(10 ** (-loss * length / 20), dtype=complex)
transmission = amplitude * jnp.exp(1j * phase)
return reciprocal(
{
("o1", "o2"): transmission,
}
)
def bend(wl: float = 1.5, length: float = 20.0, loss: float = 0.0) -> SDict:
"""Returns bend Sparameters."""
amplitude = jnp.asarray(10 ** (-loss * length / 20), dtype=complex)
return {k: amplitude * v for k, v in straight(wl=wl, length=length).items()}
def attenuator(*, loss: float = 0.0) -> SDict:
"""Attenuator model.
based on sax.models
Args:
loss: in dB.
.. code::
o1 -------------- o2
loss
"""
transmission = jnp.asarray(10 ** (-loss / 20), dtype=complex)
return reciprocal(
{
("o1", "o2"): transmission,
}
)
def phase_shifter(
wl: float = 1.55,
neff: float = 2.34,
voltage: float = 0,
length: float = 10,
loss: float = 0.0,
) -> SDict:
"""Returns simple phase shifter model.
Args:
wl: wavelength in um.
neff: effective index.
voltage: voltage per PI phase shift.
length: in um.
loss: in dB.
"""
deltaphi = voltage * jnp.pi
phase = 2 * jnp.pi * neff * length / wl + deltaphi
amplitude = jnp.asarray(10 ** (-loss * length / 20), dtype=complex)
transmission = amplitude * jnp.exp(1j * phase)
return reciprocal(
{
("o1", "o2"): transmission,
}
)
def grating_coupler(
*,
wl: float = 1.55,
wl0: float = 1.55,
loss: float = 0.0,
reflection: float = 0.0,
reflection_fiber: float = 0.0,
bandwidth: float = 40 * nm,
) -> SDict:
"""Grating_coupler model.
equation adapted from photontorch grating coupler
https://github.com/flaport/photontorch/blob/master/photontorch/components/gratingcouplers.py
Args:
wl0: center wavelength.
loss: in dB.
reflection: from waveguide side.
reflection_fiber: from fiber side.
bandwidth: 3dB bandwidth (um).
.. code::
fiber o2
/ / / /
/ / / /
_|-|_|-|_|-|___
o1 ______________|
"""
amplitude = jnp.asarray(10 ** (-loss / 20), dtype=complex)
sigma = bandwidth / (2 * jnp.sqrt(2 * jnp.log(2)))
transmission = amplitude * jnp.exp(-((wl - wl0) ** 2) / (2 * sigma**2))
return reciprocal(
{
("o1", "o1"): reflection * jnp.ones_like(transmission),
("o1", "o2"): transmission,
("o2", "o1"): transmission,
("o2", "o2"): reflection_fiber * jnp.ones_like(transmission),
}
)
def coupler(
*,
wl: float = 1.55,
wl0: float = 1.55,
length: float = 0.0,
coupling0: float = 0.2,
dk1: float = 1.2435,
dk2: float = 5.3022,
dn: float = 0.02,
dn1: float = 0.1169,
dn2: float = 0.4821,
) -> SDict:
r"""Dispersive coupler model.
equations adapted from photontorch.
https://github.com/flaport/photontorch/blob/master/photontorch/components/directionalcouplers.py
kappa = coupling0 + coupling
Args:
wl: wavelength (um).
wl0: center wavelength (um).
length: coupling length (um).
coupling0: bend region coupling coefficient from FDTD simulations.
dk1: first derivative of coupling0 vs wavelength.
dk2: second derivative of coupling vs wavelength.
dn: effective index difference between even and odd modes.
dn1: first derivative of effective index difference vs wavelength.
dn2: second derivative of effective index difference vs wavelength.
.. code::
coupling0/2 coupling coupling0/2
<-------------><--------------------><---------->
o2 ________ _______o3
\ /
\ length /
=======================
/ \
________/ \________
o1 o4
------------------------> K (coupled power)
/
/ K
-----------------------------------> T = 1 - K (transmitted power)
"""
dwl = wl - wl0
dn = dn + dn1 * dwl + 0.5 * dn2 * dwl**2
kappa0 = coupling0 + dk1 * dwl + 0.5 * dk2 * dwl**2
kappa1 = jnp.pi * dn / wl
tau = jnp.cos(kappa0 + kappa1 * length)
kappa = -jnp.sin(kappa0 + kappa1 * length)
return reciprocal(
{
("o1", "o4"): tau,
("o1", "o3"): 1j * kappa,
("o2", "o4"): 1j * kappa,
("o2", "o3"): tau,
}
)
def coupler_single_wavelength(*, coupling: float = 0.5) -> SDict:
r"""Coupler model for a single wavelength.
Based on sax.models.
Args:
coupling: power coupling coefficient.
.. code::
o2 ________ ______o3
\ /
\ length /
=======================
/ \
________/ \_______
o1 o4
"""
kappa = coupling**0.5
tau = (1 - coupling) ** 0.5
return reciprocal(
{
("o1", "o4"): tau,
("o1", "o3"): 1j * kappa,
("o2", "o4"): 1j * kappa,
("o2", "o3"): tau,
}
)
def mmi1x2() -> SDict:
"""Returns an ideal 1x2 splitter."""
return reciprocal(
{
("o1", "o2"): 0.5**0.5,
("o1", "o3"): 0.5**0.5,
}
)
def mmi2x2(*, coupling: float = 0.5) -> SDict:
"""Returns an ideal 2x2 splitter.
Args:
coupling: power coupling coefficient.
"""
kappa = coupling**0.5
tau = (1 - coupling) ** 0.5
return reciprocal(
{
("o1", "o4"): tau,
("o1", "o3"): 1j * kappa,
("o2", "o4"): 1j * kappa,
("o2", "o3"): tau,
}
)
models = dict(
straight=straight,
bend_euler=bend,
mmi1x2=mmi1x2,
mmi2x2=mmi2x2,
attenuator=attenuator,
taper=straight,
phase_shifter=phase_shifter,
grating_coupler=grating_coupler,
coupler=coupler,
)
if __name__ == "__main__":
import gdsfactory.simulation.sax as gs
gs.plot_model(grating_coupler)
# gs.plot_model(coupler)
| mit | 8cd335c67c0f62dba46b7983a9e783c2 | 24.423611 | 100 | 0.463944 | 3.338805 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/mmi2x2.py | 1 | 3655 | import gdsfactory as gf
from gdsfactory.add_padding import get_padding_points
from gdsfactory.component import Component
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.taper import taper as taper_function
from gdsfactory.types import ComponentSpec, CrossSectionSpec, Optional
@gf.cell
def mmi2x2(
width: Optional[float] = None,
width_taper: float = 1.0,
length_taper: float = 10.0,
length_mmi: float = 5.5,
width_mmi: float = 2.5,
gap_mmi: float = 0.25,
taper: ComponentSpec = taper_function,
straight: CrossSectionSpec = straight_function,
with_bbox: bool = True,
cross_section: CrossSectionSpec = "strip",
) -> Component:
r"""Mmi 2x2.
Args:
width: input and output straight width.
width_taper: interface between input straights and mmi region.
length_taper: into the mmi region.
length_mmi: in x direction.
width_mmi: in y direction.
gap_mmi: (width_taper + gap between tapered wg)/2.
taper: taper function.
straight: straight function.
with_bbox: box in bbox_layers and bbox_offsets to avoid DRC sharp edges.
cross_section: spec.
.. code::
length_mmi
<------>
________
| |
__/ \__
W1 __ __ E1
\ /_ _ _ _
| | _ _ _ _| gap_mmi
__/ \__
W0 __ __ E0
\ /
|________|
<->
length_taper
"""
c = gf.Component()
gap_mmi = gf.snap.snap_to_grid(gap_mmi, nm=2)
w_mmi = width_mmi
w_taper = width_taper
x = gf.get_cross_section(cross_section)
width = width or x.width
taper = gf.get_component(
taper,
length=length_taper,
width1=width,
width2=w_taper,
cross_section=cross_section,
)
a = gap_mmi / 2 + width_taper / 2
mmi = c << gf.get_component(
straight, length=length_mmi, width=w_mmi, cross_section=cross_section
)
ports = [
gf.Port("o1", orientation=180, center=(0, -a), width=w_taper, cross_section=x),
gf.Port("o2", orientation=180, center=(0, +a), width=w_taper, cross_section=x),
gf.Port(
"o3",
orientation=0,
center=(length_mmi, +a),
width=w_taper,
cross_section=x,
),
gf.Port(
"o4",
orientation=0,
center=(length_mmi, -a),
width=w_taper,
cross_section=x,
),
]
for port in ports:
taper_ref = c << taper
taper_ref.connect(port="o2", destination=port)
c.add_port(name=port.name, port=taper_ref.ports["o1"])
c.absorb(taper_ref)
if with_bbox:
x = gf.get_cross_section(cross_section)
padding = []
for offset in x.bbox_offsets:
points = get_padding_points(
component=c,
default=0,
bottom=offset,
top=offset,
)
padding.append(points)
for layer, points in zip(x.bbox_layers, padding):
c.add_polygon(points, layer=layer)
c.absorb(mmi)
if x.add_bbox:
c = x.add_bbox(c)
if x.add_pins:
c = x.add_pins(c)
return c
if __name__ == "__main__":
# c = mmi2x2(gap_mmi=0.252, cross_section="metal1")
c = mmi2x2(gap_mmi=0.252)
c.show(show_ports=True)
c.pprint()
| mit | 596410cfc45ce535e9c2598135913d90 | 27.554688 | 87 | 0.517921 | 3.422285 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/simphony/plot_circuit.py | 1 | 2402 | from typing import Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
from simphony.models import Subcircuit
from simphony.simulators import SweepSimulator
def plot_circuit(
circuit: Subcircuit,
pin_in: str = "o1",
pins_out: Tuple[str, ...] = ("o2",),
start: float = 1500e-9,
stop: float = 1600e-9,
num: int = 2000,
logscale: bool = True,
fig: Optional[plt.Figure] = None,
phase: bool = False,
) -> None:
"""Plot Sparameter circuit transmission over wavelength.
Args:
circuit: to plot.
pin_in: input port name.
pins_out: iterable of pins out to plot.
start: wavelength (m).
stop: wavelength (m).
num: number of sampled points.
logscale: plot in dB scale.
fig: matplotlib figure.
phase: plots phase instead of module.
.. plot::
:include-source:
from gdsfactory.simulation.simphony.components.mzi import mzi
import gdsfactory.simulation.simphony as gs
c = mzi()
gs.plot_circuit(c)
"""
if not isinstance(pins_out, (set, list, tuple)):
raise ValueError("pins out is not iterable")
circuit = circuit() if callable(circuit) else circuit
fig = fig or plt.subplot()
ax = fig.axes
simulation = SweepSimulator(start, stop, num)
for p in pins_out:
simulation.multiconnect(circuit.pins[pin_in], circuit.pins[p])
wl, s = simulation.simulate()
wl *= 1e9
if phase:
y = np.angle(s)
ylabel = "angle (rad)"
else:
y = np.abs(s)
y = 10 * np.log10(y) if logscale else y
ylabel = "|S|" if logscale else "|S (dB)|"
ax.plot(wl, y, label=pins_out[0])
ax.set_xlabel("wavelength (nm)")
ax.set_ylabel(ylabel)
if hasattr(circuit, "name"):
ax.set_title(circuit.name)
ax.legend()
plt.show()
return ax
def demo_single_port() -> None:
import gdsfactory.simulation.simphony.components as gc
c = gc.mzi()
plot_circuit(c, logscale=False)
plt.show()
if __name__ == "__main__":
from gdsfactory.simulation.simphony.components.mzi import mzi
# import gdsfactory.simulation.simphony.components as gc
# c = gc.ring_double()
# plot_circuit(c, pins_out=("cdrop", "drop", "output", "input"))
c = mzi()
plot_circuit(c)
plt.show()
| mit | e29d4900819e1dd669095fff6242c188 | 24.553191 | 70 | 0.600333 | 3.481159 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/bend_circular.py | 1 | 2581 | import gdsfactory as gf
from gdsfactory.add_padding import get_padding_points
from gdsfactory.component import Component
from gdsfactory.path import arc
from gdsfactory.snap import snap_to_grid
from gdsfactory.types import CrossSectionSpec
@gf.cell
def bend_circular(
angle: float = 90.0,
npoints: int = 720,
with_bbox: bool = True,
cross_section: CrossSectionSpec = "strip",
**kwargs
) -> Component:
"""Returns a radial arc.
Args:
angle: angle of arc (degrees).
npoints: number of points.
with_bbox: box in bbox_layers and bbox_offsets to avoid DRC sharp edges.
cross_section: spec (CrossSection, string or dict).
kwargs: cross_section settings.
.. code::
o2
|
/
/
/
o1_____/
"""
x = gf.get_cross_section(cross_section, **kwargs)
radius = x.radius
p = arc(radius=radius, angle=angle, npoints=npoints)
c = Component()
path = p.extrude(x)
ref = c << path
c.add_ports(ref.ports)
c.absorb(ref)
c.info["length"] = float(snap_to_grid(p.length()))
c.info["dy"] = snap_to_grid(float(abs(p.points[0][0] - p.points[-1][0])))
c.info["radius"] = float(radius)
if with_bbox:
padding = []
for offset in x.bbox_offsets:
top = offset if angle == 180 else 0
points = get_padding_points(
component=c,
default=0,
bottom=offset,
right=offset,
top=top,
)
padding.append(points)
for layer, points in zip(x.bbox_layers, padding):
c.add_polygon(points, layer=layer)
return c
bend_circular180 = gf.partial(bend_circular, angle=180)
if __name__ == "__main__":
c = bend_circular(
width=2,
layer=(0, 0),
angle=90,
cross_section="rib",
with_bbox=True,
)
# c = bend_circular()
# c = bend_circular(cross_section=gf.cross_section.pin, radius=5)
# c.pprint_ports()
print(c.ports["o2"].orientation)
c.show(show_ports=True)
# c = bend_circular180()
# c.plot("qt")
# from gdsfactory.quickplotter import quickplot2
# c = bend_circular_trenches()
# c = bend_circular_deep_rib()
# print(c.ports)
# print(c.length, np.pi * 10)
# print(c.ports.keys())
# print(c.ports['o2'].center)
# print(c.settings)
# c = bend_circular_slot()
# c = bend_circular(width=0.45, radius=5)
# c.plot()
# quickplot2(c)
| mit | c79cfebb1bf3ce4ae88e6abbad7b28b9 | 25.070707 | 80 | 0.563348 | 3.360677 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/add_tapers_cross_section.py | 1 | 2217 | from typing import Callable, Optional
import gdsfactory as gf
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.taper_cross_section import taper_cross_section
from gdsfactory.cross_section import strip
from gdsfactory.port import select_ports_optical
from gdsfactory.types import ComponentSpec, CrossSectionSpec
@cell
def add_tapers(
component: Component,
taper: ComponentSpec = taper_cross_section,
select_ports: Optional[Callable] = select_ports_optical,
taper_port_name1: str = "o1",
taper_port_name2: str = "o2",
cross_section2: CrossSectionSpec = strip,
**kwargs
) -> Component:
"""Returns new component with taper in all optical ports.
Args:
component: to add tapers.
taper: taper spec.
select_ports: function to select ports.
taper_port_name1: name.
taper_port_name2: name.
cross_section2: end cross_section factory (cross_section).
Keyword Args:
cross_section1: start cross_section factory.
length: transition length.
npoints: number of points.
linear: shape of the transition, sine when False.
kwargs: cross_section settings for section2.
"""
c = gf.Component()
ports_to_taper = select_ports(component.ports) if select_ports else component.ports
ports_to_taper_names = [p.name for p in ports_to_taper.values()]
for port_name, port in component.ports.items():
if port.name in ports_to_taper_names:
taper_ref = c << taper(
cross_section1=port.cross_section,
cross_section2=cross_section2,
**kwargs
)
taper_ref.connect(taper_ref.ports[taper_port_name1].name, port)
c.add_port(name=port_name, port=taper_ref.ports[taper_port_name2])
else:
c.add_port(name=port_name, port=port)
c.add_ref(component)
c.copy_child_info(component)
return c
if __name__ == "__main__":
c0 = gf.components.straight(width=2, cross_section=gf.cross_section.rib)
xs_rib_tip = gf.cross_section.strip_rib_tip
c1 = add_tapers(c0, cross_section2=xs_rib_tip, linear=True)
c1.show()
| mit | af1ac04b72c9f649cb5c1b4cf5ad2604 | 33.640625 | 87 | 0.665765 | 3.464063 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/labels/ehva.py | 1 | 3766 | from typing import Dict, List, Optional, Tuple
import flatdict
import pydantic
import gdsfactory as gf
from gdsfactory.name import clean_name
from gdsfactory.snap import snap_to_grid as snap
from gdsfactory.types import Layer
class Dft(pydantic.BaseModel):
pad_size: Tuple[int, int] = (100, 100)
pad_pitch: int = 125
pad_width: int = 100
pad_gc_spacing_opposed: int = 500
pad_gc_spacing_adjacent: int = 1000
DFT = Dft()
ignore = (
"cross_section",
"decorator",
"cross_section1",
"cross_section2",
"contact",
"pad",
)
port_types = {
"vertical_te": "OPTICALPORT",
"pad": "ELECTRICALPORT",
"vertical_dc": "ELECTRICALPORT",
"optical": "OPTICALPORT",
"loopback": "OPTICALPORT",
}
@pydantic.validate_arguments
def add_label_ehva(
component: gf.Component,
die: str = "demo",
port_types: Dict[str, str] = port_types,
layer: Layer = (66, 0),
metadata_ignore: Optional[List[str]] = None,
metadata_include_parent: Optional[List[str]] = None,
metadata_include_child: Optional[List[str]] = None,
) -> gf.Component:
"""Returns Component with measurement labels.
Args:
component: to add labels to.
die: string.
port_types: list of port types to label.
layer: text label layer.
metadata_ignore: list of settings keys to ignore.
Works with flatdict setting:subsetting.
metadata_include_parent: includes parent metadata.
Works with flatdict setting:subsetting.
"""
metadata_ignore = metadata_ignore or []
metadata_include_parent = metadata_include_parent or []
metadata_include_child = metadata_include_child or []
text = f"""DIE NAME:{die}
CIRCUIT NAME:{component.name}
"""
info = []
metadata = component.metadata_child.changed
if metadata:
info += [
f"CIRCUITINFO NAME: {k}, VALUE: {v}"
for k, v in metadata.items()
if k not in metadata_ignore and isinstance(v, (int, float, str))
]
metadata = flatdict.FlatDict(component.metadata.full)
info += [
f"CIRCUITINFO NAME: {clean_name(k)}, VALUE: {metadata.get(k)}"
for k in metadata_include_parent
if metadata.get(k)
]
metadata = flatdict.FlatDict(component.metadata_child.full)
info += [
f"CIRCUITINFO NAME: {k}, VALUE: {metadata.get(k)}"
for k in metadata_include_child
if metadata.get(k)
]
text += "\n".join(info)
text += "\n"
info = []
if component.ports:
for port_type_gdsfactory, port_type_ehva in port_types.items():
info += [
f"{port_type_ehva} NAME: {port.name} TYPE: {port_type_gdsfactory}, "
f"POSITION RELATIVE:({snap(port.x)}, {snap(port.y)}),"
f" ORIENTATION: {port.orientation}"
for port in component.get_ports_list(port_type=port_type_gdsfactory)
]
text += "\n".join(info)
component.unlock()
label = gf.Label(
text=text,
origin=(0, 0),
anchor="o",
layer=layer[0],
texttype=layer[1],
)
component.add(label)
component.lock()
return component
if __name__ == "__main__":
c = gf.c.straight(length=11)
c = gf.c.mmi2x2(length_mmi=2.2)
c = gf.routing.add_fiber_array(
c, get_input_labels_function=None, grating_coupler=gf.c.grating_coupler_te
)
add_label_ehva(
c,
die="demo_die",
metadata_include_parent=["grating_coupler:settings:polarization"],
)
# add_label_ehva(c, die="demo_die", metadata_include_child=["width_mmi"])
# add_label_ehva(c, die="demo_die", metadata_include_child=[])
print(c.labels)
c.show(show_ports=True)
| mit | a892d3c519408b484185df03a1f31296 | 26.489051 | 84 | 0.607276 | 3.395852 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/routing/route_sharp.py | 1 | 14787 | """based on phidl.routing."""
from typing import Optional, Tuple
import numpy as np
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.cross_section import CrossSection
from gdsfactory.path import Path, transition
from gdsfactory.port import Port
from gdsfactory.routing.route_quad import _get_rotated_basis
from gdsfactory.types import CrossSectionSpec, LayerSpec
def path_straight(port1: Port, port2: Port) -> Path:
"""Return waypoint path between port1 and port2 in a straight line.
Useful when ports point directly at each other.
Args:
port1: start port.
port2: end port.
"""
delta_orientation = np.round(
np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3
)
e1, e2 = _get_rotated_basis(port1.orientation)
displacement = port2.center - port1.center
xrel = np.round(
np.dot(displacement, e1), 3
) # relative position of port 2, forward/backward
yrel = np.round(
np.dot(displacement, e2), 3
) # relative position of port 2, left/right
if (delta_orientation not in (0, 180, 360)) or (yrel != 0) or (xrel <= 0):
raise ValueError("path_straight(): ports must point directly at each other.")
return Path(np.array([port1.center, port2.center]))
def path_L(port1: Port, port2: Port) -> Path:
"""Return waypoint path between port1 and port2 in an L shape.
Useful when orthogonal ports can be directly connected with one turn.
Args:
port1: start port.
port2: end port.
"""
delta_orientation = np.round(
np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3
)
if delta_orientation not in (90, 270):
raise ValueError("path_L(): ports must be orthogonal.")
e1, e2 = _get_rotated_basis(port1.orientation)
# assemble waypoints
pt1 = port1.center
pt3 = port2.center
delta_vec = pt3 - pt1
pt2 = pt1 + np.dot(delta_vec, e1) * e1
return Path(np.array([pt1, pt2, pt3]))
def path_U(port1: Port, port2: Port, length1=200) -> Path:
"""Return waypoint path between port1 and port2 in a U shape. Useful when ports face the same direction or toward each other.
Args:
port1: start port.
port2: end port.
length1: Length of segment exiting port1.
Should be larger than bend radius.
"""
delta_orientation = np.round(
np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3
)
if delta_orientation not in (0, 180, 360):
raise ValueError("path_U(): ports must be parallel.")
theta = np.radians(port1.orientation)
e1 = np.array([np.cos(theta), np.sin(theta)])
e2 = np.array([-1 * np.sin(theta), np.cos(theta)])
# assemble waypoints
pt1 = port1.center
pt4 = port2.center
pt2 = pt1 + length1 * e1 # outward by length1 distance
delta_vec = pt4 - pt2
pt3 = pt2 + np.dot(delta_vec, e2) * e2
return Path(np.array([pt1, pt2, pt3, pt4]))
def path_J(port1: Port, port2: Port, length1=200, length2=200) -> Path:
"""Return waypoint path between port1 and port2 in a J shape. Useful when \
orthogonal ports cannot be connected directly with an L shape.
Args:
port1: start port.
port2: end port.
length1: Length of segment exiting port1.
Should be larger than bend radius.
length2: Length of segment exiting port2.
Should be larger than bend radius.
"""
delta_orientation = np.round(
np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3
)
if delta_orientation not in (90, 270):
raise ValueError("path_J(): ports must be orthogonal.")
e1, _ = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble waypoints
pt1 = port1.center
pt2 = pt1 + length1 * e1 # outward from port1 by length1
pt5 = port2.center
pt4 = pt5 + length2 * e2 # outward from port2 by length2
delta_vec = pt4 - pt2
pt3 = pt2 + np.dot(delta_vec, e2) * e2 # move orthogonally in e2 direction
return Path(np.array([pt1, pt2, pt3, pt4, pt5]))
def path_C(port1: Port, port2: Port, length1=100, left1=100, length2=100) -> Path:
"""Return waypoint path between port1 and port2 in a C shape. Useful when ports are parallel and face away from each other.
Args:
port1: start port.
port2: end port.
length1: Length of route segment coming out of port1. Should be at larger
than bend radius.
left1: Length of route segment that turns left (or right if negative)
from port1. Should be larger than twice the bend radius.
length2: Length of route segment coming out of port2. Should be larger
than bend radius.
"""
delta_orientation = np.round(
np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3
)
if delta_orientation not in (0, 180, 360):
raise ValueError("path_C(): ports must be parallel.")
e1, e_left = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble route points
pt1 = port1.center
pt2 = pt1 + length1 * e1 # outward from port1 by length1
pt3 = pt2 + left1 * e_left # leftward by left1
pt6 = port2.center
pt5 = pt6 + length2 * e2 # outward from port2 by length2
delta_vec = pt5 - pt3
pt4 = pt3 + np.dot(delta_vec, e1) * e1 # move orthogonally in e1 direction
return Path(np.array([pt1, pt2, pt3, pt4, pt5, pt6]))
def path_manhattan(port1: Port, port2: Port, radius) -> Path:
"""Return waypoint path between port1 and port2 using manhattan routing. Routing is performed using straight, L, U, J, or C waypoint path as needed. Ports must face orthogonal or parallel directions.
Args:
port1: start port.
port2: end port.
radius: Bend radius for 90 degree bend.
"""
radius = radius + 0.1 # ensure space for bend radius
e1, e2 = _get_rotated_basis(port1.orientation)
displacement = port2.center - port1.center
xrel = np.round(
np.dot(displacement, e1), 3
) # port2 position, forward(+)/backward(-) from port 1
yrel = np.round(
np.dot(displacement, e2), 3
) # port2 position, left(+)/right(-) from port1
orel = np.round(
np.abs(np.mod(port2.orientation - port1.orientation, 360)), 3
) # relative orientation
if orel not in (0, 90, 180, 270, 360):
raise ValueError(
"path_manhattan(): ports must face parallel or orthogonal directions."
)
if orel in (90, 270):
# Orthogonal case
if (
(orel == 90 and yrel < -1 * radius) or (orel == 270 and yrel > radius)
) and xrel > radius:
pts = path_L(port1, port2)
else:
# Adjust length1 and length2 to ensure intermediate segments fit bend radius
direction = -1 if (orel == 270) else 1
length2 = (
2 * radius - direction * yrel
if (np.abs(radius + direction * yrel) < 2 * radius)
else radius
)
length1 = (
2 * radius + xrel if (np.abs(radius - xrel) < 2 * radius) else radius
)
pts = path_J(port1, port2, length1=length1, length2=length2)
elif orel == 180 and yrel == 0 and xrel > 0:
pts = path_straight(port1, port2)
elif (orel == 180 and xrel <= 2 * radius) or (np.abs(yrel) < 2 * radius):
# Adjust length1 and left1 to ensure intermediate segments fit bend radius
left1 = np.abs(yrel) + 2 * radius if (np.abs(yrel) < 4 * radius) else 2 * radius
y_direction = -1 if (yrel < 0) else 1
left1 = y_direction * left1
length2 = radius
x_direction = -1 if (orel == 180) else 1
segmentx_length = np.abs(xrel + x_direction * length2 - radius)
length1 = (
xrel + x_direction * length2 + 2 * radius
if segmentx_length < 2 * radius
else radius
)
pts = path_C(port1, port2, length1=length1, length2=length2, left1=left1)
else:
# Adjust length1 to ensure segment comes out of port2
length1 = radius + xrel if (orel == 0 and xrel > 0) else radius
pts = path_U(port1, port2, length1=length1)
return pts
def path_Z(port1: Port, port2: Port, length1=100, length2=100) -> Path:
"""Return waypoint path between port1 and port2 in a Z shape. Ports can \
have any relative orientation.
Args:
port1: start port.
port2: end port.
length1: Length of route segment coming out of port1.
length2: Length of route segment coming out of port2.
"""
# get basis vectors in port directions
e1, _ = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble route points
pt1 = port1.center
pt2 = pt1 + length1 * e1 # outward from port1 by length1
pt4 = port2.center
pt3 = pt4 + length2 * e2 # outward from port2 by length2
return Path(np.array([pt1, pt2, pt3, pt4]))
def path_V(port1: Port, port2: Port) -> Path:
"""Return waypoint path between port1 and port2 in a V shape. Useful when \
ports point to a single connecting point.
Args:
port1: start port.
port2: end port.
"""
# get basis vectors in port directions
e1, _ = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble route points
pt1 = port1.center
pt3 = port2.center
# solve for intersection
E = np.column_stack((e1, -1 * e2))
pt2 = np.matmul(np.linalg.inv(E), pt3 - pt1)[0] * e1 + pt1
return Path(np.array([pt1, pt2, pt3]))
@gf.cell
def route_sharp(
port1: Port,
port2: Port,
width: Optional[float] = None,
path_type: str = "manhattan",
manual_path=None,
layer: Optional[LayerSpec] = None,
cross_section: Optional[CrossSectionSpec] = None,
port_names: Tuple[str, str] = ("o1", "o2"),
**kwargs
) -> Component:
"""Returns Component route between ports.
Args:
port1: start port.
port2: end port.
width: None, int, float, array-like[2], or CrossSection
If None, the route linearly tapers between the widths the ports
If set to a single number (e.g. `width=1.7`): makes a fixed-width route
If set to a 2-element array (e.g. `width=[1.8,2.5]`): makes a route
whose width varies linearly from width[0] to width[1]
If set to a CrossSection: uses the CrossSection parameters for the route
path_type : {'manhattan', 'L', 'U', 'J', 'C', 'V', 'Z', 'straight', 'manual'}
Method of waypoint path creation. Should be one of
- 'manhattan' - automatic manhattan routing
(see path_manhattan() ).
- 'L' - L-shaped path for orthogonal ports that can be directly
connected (see path_L() ).
- 'U' - U-shaped path for parallel or facing ports
(see path_U() ).
- 'J' - J-shaped path for orthogonal ports that cannot be
directly connected (see path_J() ).
- 'C' - C-shaped path for ports that face away from each
other (see path_C() ).
- 'Z' - Z-shaped path with three segments for ports at any
angles (see path_Z() ).
- 'V' - V-shaped path with two segments for ports at any
angles (see path_V() ).
- 'straight' - straight path for ports that face each other
see path_straight() ).
- 'manual' - use an explicit waypoint path provided
in manual_path.
manual_path : array-like[N][2] or Path
Waypoint path for creating a manual route
layer: Layer to put route on.
kwargs: Keyword arguments passed to the waypoint path function.
.. plot::
:include-source:
import gdsfactory as gf
c = gf.Component("pads")
c1 = c << gf.components.pad(port_orientation=None)
c2 = c << gf.components.pad(port_orientation=None)
c2.movex(400)
c2.movey(-200)
route = c << gf.routing.route_sharp(c1.ports["e4"], c2.ports["e1"], path_type="L")
c.plot()
"""
if path_type == "C":
P = path_C(port1, port2, **kwargs)
elif path_type == "J":
P = path_J(port1, port2, **kwargs)
elif path_type == "L":
P = path_L(port1, port2)
elif path_type == "U":
P = path_U(port1, port2, **kwargs)
elif path_type == "V":
P = path_V(port1, port2)
elif path_type == "Z":
P = path_Z(port1, port2, **kwargs)
elif path_type == "manhattan":
radius = max(port1.width, port2.width)
P = path_manhattan(port1, port2, radius=radius)
elif path_type == "manual":
P = manual_path if isinstance(manual_path, Path) else Path(manual_path)
elif path_type == "straight":
P = path_straight(port1, port2)
else:
raise ValueError(
"""route_sharp() received an invalid path_type. Must be one of
{'manhattan', 'L', 'U', 'J', 'C', 'V', 'Z', 'straight', 'manual'}"""
)
if cross_section:
cross_section = gf.get_cross_section(cross_section)
D = P.extrude(cross_section=cross_section)
elif width is None:
layer = layer or port1.layer
X1 = CrossSection(
width=port1.width, port_names=port_names, layer=layer, name="x1"
)
X2 = CrossSection(
width=port2.width, port_names=port_names, layer=layer, name="x2"
)
cross_section = transition(
cross_section1=X1, cross_section2=X2, width_type="linear"
)
D = P.extrude(cross_section=cross_section)
else:
D = P.extrude(width=width, layer=layer)
if not isinstance(width, CrossSection):
newport1 = D.add_port(port=port1, name=1).rotate(180)
newport2 = D.add_port(port=port2, name=2).rotate(180)
if np.size(width) == 1:
newport1.width = width
newport2.width = width
if np.size(width) == 2:
newport1.width = width[0]
newport2.width = width[1]
return D
if __name__ == "__main__":
c = gf.Component("pads")
c1 = c << gf.components.pad(port_orientation=None)
c2 = c << gf.components.pad(port_orientation=None)
c2.movex(400)
c2.movey(-200)
route = c << route_sharp(c1.ports["e4"], c2.ports["e1"], path_type="L")
c.show(show_ports=True)
| mit | 7dcb33702c5ea71d0efae3f689f5af12 | 36.625954 | 204 | 0.598837 | 3.418169 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/simphony/components/mzi_siepic.py | 1 | 1723 | from simphony.libraries import siepic
from gdsfactory.simulation.simphony.components.mmi1x2 import mmi1x2
def mzi(L0=1, DL=100.0, L2=10.0, y_model_factory=mmi1x2, wg=siepic.Waveguide):
"""Mzi circuit model.
Args:
L0 (um): vertical length for both and top arms
DL (um): bottom arm extra length, delta_length = 2*DL
L2 (um): L_top horizontal length
Return: mzi circuit model
.. code::
__L2__
| |
L0 L0r
| |
splitter==| |==recombiner
| |
L0 L0r
| |
DL DL
| |
|__L2__|
.. plot::
:include-source:
import gdsfactory as gf
c = gf.c.mzi(L0=0.1, DL=0, L2=10)
gf.plotgds(c)
.. plot::
:include-source:
import gdsfactory.simulation.simphony as gs
import gdsfactory.simulation.simphony.components as gc
c = gc.mzi()
gs.plot_circuit(c)
"""
y_splitter = y_model_factory() if callable(y_model_factory) else y_model_factory
y_recombiner = y_model_factory() if callable(y_model_factory) else y_model_factory
wg_long = wg(length=(2 * L0 + 2 * DL + L2) * 1e-6)
wg_short = wg(length=(2 * L0 + L2) * 1e-6)
y_recombiner.pins[0].rename("o2")
y_splitter[1].connect(wg_long)
y_splitter[2].connect(wg_short)
y_recombiner.multiconnect(None, wg_long, wg_short)
return y_splitter.circuit.to_subcircuit("mzi")
if __name__ == "__main__":
import matplotlib.pyplot as plt
from gdsfactory.simulation.simphony import plot_circuit
c = mzi()
plot_circuit(c)
plt.show()
| mit | 5f1167bedfe61ae86d0d3b148d6e0629 | 23.267606 | 86 | 0.548462 | 3.076786 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/via_stack.py | 1 | 3301 | from typing import Optional, Tuple
from numpy import floor
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.compass import compass
from gdsfactory.components.via import via1, via2, viac
from gdsfactory.tech import LAYER
from gdsfactory.types import ComponentSpec, LayerSpec, LayerSpecs
@gf.cell
def via_stack(
size: Tuple[float, float] = (11.0, 11.0),
layers: LayerSpecs = ("M1", "M2", "M3"),
vias: Optional[Tuple[Optional[ComponentSpec], ...]] = (via1, via2),
layer_port: LayerSpec = None,
) -> Component:
"""Rectangular via array stack.
You can use it to connect different metal layers or metals to silicon.
You can use the naming convention via_stack_layerSource_layerDestination
contains 4 ports (e1, e2, e3, e4)
also know as Via array
http://www.vlsi-expert.com/2017/12/vias.html
spacing = via.info['spacing']
enclosure = via.info['enclosure']
Args:
size: of the layers.
layers: layers on which to draw rectangles.
vias: vias to use to fill the rectangles.
layer_port: if None assumes port is on the last layer.
"""
width, height = size
a = width / 2
b = height / 2
layers = layers or []
if layers:
layer_port = layer_port or layers[-1]
c = Component()
c.height = height
c.info["size"] = (float(size[0]), float(size[1]))
c.info["layer"] = layer_port
for layer in layers:
if layer == layer_port:
ref = c << compass(
size=(width, height), layer=layer, port_type="electrical"
)
c.add_ports(ref.ports)
else:
ref = c << compass(size=(width, height), layer=layer, port_type="placement")
vias = vias or []
for via in vias:
if via is not None:
via = gf.get_component(via)
w, h = via.info["size"]
g = via.info["enclosure"]
pitch_x, pitch_y = via.info["spacing"]
nb_vias_x = (width - w - 2 * g) / pitch_x + 1
nb_vias_y = (height - h - 2 * g) / pitch_y + 1
nb_vias_x = int(floor(nb_vias_x)) or 1
nb_vias_y = int(floor(nb_vias_y)) or 1
ref = c.add_array(
via, columns=nb_vias_x, rows=nb_vias_y, spacing=(pitch_x, pitch_y)
)
cw = (width - (nb_vias_x - 1) * pitch_x - w) / 2
ch = (height - (nb_vias_y - 1) * pitch_y - h) / 2
x0 = -a + cw + w / 2
y0 = -b + ch + h / 2
ref.move((x0, y0))
return c
via_stack_m1_m3 = gf.partial(
via_stack,
layers=(LAYER.M1, LAYER.M2, LAYER.M3),
vias=(via1, via2),
)
via_stack_slab_m3 = gf.partial(
via_stack,
layers=(LAYER.SLAB90, LAYER.M1, LAYER.M2, LAYER.M3),
vias=(viac, via1, via2),
)
via_stack_npp_m1 = gf.partial(
via_stack,
layers=(LAYER.WG, LAYER.NPP, LAYER.M1),
vias=(None, None, viac),
)
via_stack_slab_npp_m3 = gf.partial(
via_stack,
layers=(LAYER.SLAB90, LAYER.NPP, LAYER.M1),
vias=(None, None, viac),
)
via_stack_heater_m3 = gf.partial(
via_stack, layers=(LAYER.HEATER, LAYER.M2, LAYER.M3), vias=(via1, via2)
)
if __name__ == "__main__":
c = via_stack_m1_m3()
print(c.to_dict())
c.show(show_ports=True)
| mit | ee6962aa40f9156031c3f792670d4aa1 | 27.213675 | 88 | 0.578007 | 2.957885 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/modes/find_neff_ng_dw_dh.py | 1 | 4412 | """Compute group and effective index for different waveguide widths and heights.
Reproduce Yufei thesis results with MPB.
https://www.photonics.intec.ugent.be/contact/people.asp?ID=332
"""
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pydantic
from scipy.interpolate import interp2d
from gdsfactory.config import PATH
from gdsfactory.simulation.modes.find_mode_dispersion import find_mode_dispersion
PATH.modes = pathlib.Path.cwd() / "data"
nm = 1e-3
width0 = 465 * nm
thickness0 = 215 * nm
@pydantic.validate_arguments
def find_neff_ng_dw_dh(
width: float = width0,
thickness: float = thickness0,
delta_width: float = 30 * nm,
delta_thickness: float = 20 * nm,
wavelength: float = 1.55,
steps: int = 11,
mode_number: int = 1,
core: str = "Si",
clad: str = "SiO2",
**kwargs
) -> pd.DataFrame:
"""Computes group and effective index for different widths and heights.
Args:
width: nominal waveguide width in um.
thickness: nominal waveguide thickness in um.
delta_width: delta width max in um.
delta_thickness: delta thickness max in um.
wavelength: center wavelength (um).
steps: number of steps to sweep in width and thickness.
mode_number: mode index to compute (1: fundanmental mode).
core: core material name.
clad: clad material name.
Keyword Args:
wg_thickness: wg height (um).
sx: supercell width (um).
sy: supercell height (um).
resolution: (pixels/um).
wavelength: wavelength in um.
num_bands: mode order.
plot: if True plots mode.
logscale: plots in logscale.
plotH: plot magnetic field.
cache: path to save the modes.
polarization: prefix when saving the modes.
paririty: symmetries mp.ODD_Y mp.EVEN_X for TE, mp.EVEN_Y for TM.
"""
dw = np.linspace(-delta_width, delta_width, steps)
dh = np.linspace(-delta_thickness, delta_thickness, steps)
neffs = []
ngs = []
dhs = []
dws = []
for dwi in dw:
for dhi in dh:
m = find_mode_dispersion(
core=core,
clad=clad,
wg_width=width + dwi,
wg_thickness=thickness + dhi,
wavelength=wavelength,
mode_number=mode_number,
**kwargs
)
neffs.append(m.neff)
ngs.append(m.ng)
dws.append(dwi)
dhs.append(dhi)
return pd.DataFrame(dict(dw=dws, dh=dhs, neff=neffs, ng=ngs))
def plot_neff_ng_dw_dh(
width: float = width0,
thickness: float = thickness0,
wavelength: float = 1.55,
mode_number: int = 1,
**kwargs
) -> None:
"""Plot neff and group index versus width (dw) and height (dh) variations.
Args:
width: waveguide width in um.
thickness: waveguide thickness in um.
wavelength: in um.
mode_number: 1 is the fundamental first order mode.
"""
filepath = pathlib.Path(PATH.modes / "mpb_dw_dh_dispersion.csv")
m = find_mode_dispersion(
wg_width=width, wg_thickness=thickness, wavelength=wavelength
)
neff0 = m.neff
ng0 = m.ng
if filepath.exists():
df = pd.read_csv(filepath)
else:
df = find_neff_ng_dw_dh(wavelength=wavelength, **kwargs)
cache = filepath.parent
cache.mkdir(exist_ok=True, parents=True)
df.to_csv(filepath)
dws = df.dw.values
dhs = df.dh.values
ngs = df.ng.values
neffs = df.neff.values
# neff interpolation
f_w = interp2d(neffs, ngs, np.array(dws), kind="cubic")
f_h = interp2d(neffs, ngs, np.array(dhs), kind="cubic")
ws = width + np.array(dws)
hs = thickness + np.array(dhs)
plt.plot(ws * 1e3, hs * 1e3, "ko")
extracted_dw = []
extracted_dh = []
for neff, ng in zip(neffs, ngs):
temp_w = f_w(neff, ng) + width
temp_h = f_h(neff, ng) + thickness
extracted_dw.append(temp_w * 1e3)
extracted_dh.append(temp_h * 1e3)
plt.plot(extracted_dw, extracted_dh, "rx")
plt.xlabel("width (nm)")
plt.ylabel("height (nm)")
plt.figure()
plt.plot(neffs, ngs, "ro")
plt.plot(neff0, ng0, "bx")
plt.xlabel("neff")
plt.ylabel("ng")
plt.show()
if __name__ == "__main__":
plot_neff_ng_dw_dh()
| mit | 9df5618a56c9f10d6071a43be9319a0d | 26.403727 | 81 | 0.602901 | 3.201742 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/routing/get_bundle.py | 1 | 24019 | """Routes bundles of ports (river routing).
get bundle is the generic river routing function
get_bundle calls different function depending on the port orientation.
- get_bundle_same_axis: ports facing each other with arbitrary pitch on each side
- get_bundle_corner: 90Deg / 270Deg between ports with arbitrary pitch
- get_bundle_udirect: ports with direct U-turns
- get_bundle_uindirect: ports with indirect U-turns
"""
from functools import partial
from typing import Callable, List, Optional, Union
import numpy as np
from numpy import ndarray
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.via_corner import via_corner
from gdsfactory.components.wire import wire_corner
from gdsfactory.cross_section import strip
from gdsfactory.port import Port
from gdsfactory.routing.get_bundle_corner import get_bundle_corner
from gdsfactory.routing.get_bundle_from_steps import get_bundle_from_steps
from gdsfactory.routing.get_bundle_from_waypoints import get_bundle_from_waypoints
from gdsfactory.routing.get_bundle_sbend import get_bundle_sbend
from gdsfactory.routing.get_bundle_u import get_bundle_udirect, get_bundle_uindirect
from gdsfactory.routing.get_route import get_route, get_route_from_waypoints
from gdsfactory.routing.manhattan import generate_manhattan_waypoints
from gdsfactory.routing.path_length_matching import path_length_matched_points
from gdsfactory.routing.sort_ports import get_port_x, get_port_y
from gdsfactory.routing.sort_ports import sort_ports as sort_ports_function
from gdsfactory.types import (
ComponentSpec,
CrossSectionSpec,
MultiCrossSectionAngleSpec,
Route,
)
def get_bundle(
ports1: List[Port],
ports2: List[Port],
separation: float = 5.0,
extension_length: float = 0.0,
straight: ComponentSpec = straight_function,
bend: ComponentSpec = bend_euler,
with_sbend: bool = False,
sort_ports: bool = True,
cross_section: Union[CrossSectionSpec, MultiCrossSectionAngleSpec] = "strip",
**kwargs,
) -> List[Route]:
"""Returns list of routes to connect two groups of ports.
Routes connect a bundle of ports with a river router.
Chooses the correct routing function depending on port angles.
Args:
ports1: list of starting ports.
ports2: list of end ports.
separation: bundle separation (center to center).
extension_length: adds straight extension.
bend: function for the bend. Defaults to euler.
with_sbend: use s_bend routing when there is no space for manhattan routing.
sort_ports: sort port coordinates.
cross_section: CrossSection or function that returns a cross_section.
Keyword Args:
width: main layer waveguide width (um).
layer: main layer for waveguide.
width_wide: wide waveguides width (um) for low loss routing.
auto_widen: taper to wide waveguides for low loss routing.
auto_widen_minimum_length: minimum straight length for auto_widen.
taper_length: taper_length for auto_widen.
bbox_layers: list of layers for rectangular bounding box.
bbox_offsets: list of bounding box offsets.
cladding_layers: list of layers to extrude.
cladding_offsets: list of offset from main Section edge.
radius: bend radius (um).
sections: list of Sections(width, offset, layer, ports).
port_names: for input and output ('o1', 'o2').
port_types: for input and output: electrical, optical, vertical_te ...
min_length: defaults to 1nm = 10e-3um for routing.
start_straight_length: straight length at the beginning of the route.
end_straight_length: end length at the beginning of the route.
snap_to_grid: can snap points to grid when extruding the path.
steps: specify waypoint steps to route using get_bundle_from_steps.
waypoints: specify waypoints to route using get_bundle_from_steps.
path_length_match_loops: Integer number of loops to add to bundle
for path length matching (won't try to match if None).
path_length_match_extra_length: Extra length to add
to path length matching loops (requires path_length_match_loops != None).
path_length_match_modify_segment_i: Index of straight segment to add path
length matching loops to (requires path_length_match_loops != None).
.. plot::
:include-source:
import gdsfactory as gf
@gf.cell
def test_north_to_south():
dy = 200.0
xs1 = [-500, -300, -100, -90, -80, -55, -35, 200, 210, 240, 500, 650]
pitch = 10.0
N = len(xs1)
xs2 = [-20 + i * pitch for i in range(N // 2)]
xs2 += [400 + i * pitch for i in range(N // 2)]
a1 = 90
a2 = a1 + 180
ports1 = [gf.Port(f"top_{i}", center=(xs1[i], +0), width=0.5, orientation=a1, layer=(1,0)) for i in range(N)]
ports2 = [gf.Port(f"bot_{i}", center=(xs2[i], dy), width=0.5, orientation=a2, layer=(1,0)) for i in range(N)]
c = gf.Component()
routes = gf.routing.get_bundle(ports1, ports2)
for route in routes:
c.add(route.references)
return c
gf.config.set_plot_options(show_subports=False)
c = test_north_to_south()
c.plot()
"""
# convert single port to list
if isinstance(ports1, Port):
ports1 = [ports1]
if isinstance(ports2, Port):
ports2 = [ports2]
# convert ports dict to list
if isinstance(ports1, dict):
ports1 = list(ports1.values())
if isinstance(ports2, dict):
ports2 = list(ports2.values())
for p in ports1:
p.orientation = (
int(p.orientation) % 360 if p.orientation is not None else p.orientation
)
for p in ports2:
p.orientation = (
int(p.orientation) % 360 if p.orientation is not None else p.orientation
)
if len(ports1) != len(ports2):
raise ValueError(f"ports1={len(ports1)} and ports2={len(ports2)} must be equal")
if sort_ports:
ports1, ports2 = sort_ports_function(ports1, ports2)
start_port_angles = {p.orientation for p in ports1}
if len(start_port_angles) > 1:
raise ValueError(f"All start port angles {start_port_angles} must be equal")
params = {
"ports1": ports1,
"ports2": ports2,
"separation": separation,
"bend": bend,
"straight": straight,
"cross_section": cross_section,
}
params.update(**kwargs)
start_angle = ports1[0].orientation
end_angle = ports2[0].orientation
start_axis = "X" if start_angle in [0, 180] else "Y"
end_axis = "X" if end_angle in [0, 180] else "Y"
x_start = np.mean([p.x for p in ports1])
x_end = np.mean([p.x for p in ports2])
y_start = np.mean([p.y for p in ports1])
y_end = np.mean([p.y for p in ports2])
if "steps" in kwargs:
return get_bundle_from_steps(**params)
elif "waypoints" in kwargs:
return get_bundle_from_waypoints(**params)
if start_axis != end_axis:
return get_bundle_corner(**params)
if (
start_angle == 0
and end_angle == 180
and x_start < x_end
or start_angle == 180
and end_angle == 0
and x_start > x_end
or start_angle == 90
and end_angle == 270
and y_start < y_end
or start_angle == 270
and end_angle == 90
and y_start > y_end
):
# print("get_bundle_same_axis")
if with_sbend:
return get_bundle_sbend(ports1, ports2, sort_ports=sort_ports, **kwargs)
return get_bundle_same_axis(**params)
elif start_angle == end_angle:
# print('get_bundle_udirect')
return get_bundle_udirect(**params)
elif end_angle == (start_angle + 180) % 360:
# print("get_bundle_uindirect")
return get_bundle_uindirect(extension_length=extension_length, **params)
else:
raise NotImplementedError("This should never happen")
def get_port_width(port: Port) -> Union[float, int]:
return port.width
def are_decoupled(
x1: float,
x1p: float,
x2: float,
x2p: float,
sep: Union[str, float] = "metal_spacing",
) -> bool:
sep = gf.get_constant(sep)
if x2p + sep > x1:
return False
return False if x2 < x1p + sep else x2 >= x1p - sep
def get_bundle_same_axis(
ports1: List[Port],
ports2: List[Port],
separation: float = 5.0,
end_straight_length: float = 0.0,
start_straight_length: float = 0.0,
bend: ComponentSpec = bend_euler,
sort_ports: bool = True,
path_length_match_loops: Optional[int] = None,
path_length_match_extra_length: float = 0.0,
path_length_match_modify_segment_i: int = -2,
cross_section: Union[CrossSectionSpec, MultiCrossSectionAngleSpec] = strip,
**kwargs,
) -> List[Route]:
r"""Semi auto-routing for two lists of ports.
Args:
ports1: first list of ports.
ports2: second list of ports.
separation: minimum separation between two straights.
end_straight_length: offset to add at the end of each straight.
start_straight_length: in um.
bend: spec.
sort_ports: sort the ports according to the axis.
path_length_match_loops: Integer number of loops to add to bundle
for path length matching (won't try to match if None).
path_length_match_extra_length: Extra length to add
to path length matching loops (requires path_length_match_loops != None).
path_length_match_modify_segment_i: Index of straight segment to add path
length matching loops to (requires path_length_match_loops != None).
cross_section: CrossSection or function that returns a cross_section.
kwargs: cross_section settings.
Returns:
`[route_filter(r) for r in routes]` list of lists of coordinates
e.g with default `get_route_from_waypoints`,
returns a list of elements which can be added to a component
The routing assumes manhattan routing between the different ports.
The strategy is to modify `start_straight` and `end_straight` for each
straight such that straights do not collide.
.. code::
1 X X X X X X
|-----------| | | | | |-----------------------|
| |-----| | | |---------------| |
| | || |------| | |
2 X X X X X X
ports1: at the top
ports2: at the bottom
The general strategy is:
Group tracks which would collide together and apply the following method
on each group:
if x2 >= x1, increase ``end_straight``
(as seen on the right 3 ports)
otherwise, decrease ``end_straight``
(as seen on the first 2 ports)
We deal with negative end_straight by doing at the end
end_straights = end_straights - min(end_straights)
This method deals with different metal track/wg/wire widths too.
"""
if "straight" in kwargs:
_ = kwargs.pop("straight")
assert len(ports1) == len(
ports2
), f"ports1={len(ports1)} and ports2={len(ports2)} must be equal"
if sort_ports:
ports1, ports2 = sort_ports_function(ports1, ports2)
routes = _get_bundle_waypoints(
ports1,
ports2,
separation=separation,
bend=bend,
cross_section=cross_section,
end_straight_length=end_straight_length,
start_straight_length=start_straight_length,
**kwargs,
)
if path_length_match_loops:
routes = [np.array(route) for route in routes]
routes = path_length_matched_points(
routes,
extra_length=path_length_match_extra_length,
bend=bend,
nb_loops=path_length_match_loops,
modify_segment_i=path_length_match_modify_segment_i,
cross_section=cross_section,
**kwargs,
)
return [
get_route_from_waypoints(
route,
bend=bend,
cross_section=cross_section,
**kwargs,
)
for route in routes
]
def _get_bundle_waypoints(
ports1: List[Port],
ports2: List[Port],
separation: float = 30,
end_straight_length: float = 0.0,
tol: float = 0.00001,
start_straight_length: float = 0.0,
cross_section: CrossSectionSpec = "strip",
**kwargs,
) -> List[ndarray]:
"""Returns route coordinates List.
Args:
ports1: list of starting ports.
ports2: list of end ports.
separation: route spacing.
end_straight_length: adds a straight.
tol: tolerance.
start_straight_length: length of straight.
cross_section: CrossSection or function that returns a cross_section.
kwargs: cross_section settings.
"""
if not ports1 and not ports2:
return []
assert len(ports1) == len(
ports2
), f"ports1={len(ports1)} and ports2={len(ports2)} must be equal"
if not ports1 or not ports2:
print(f"WARNING! ports1={ports1} or ports2={ports2} are empty")
return []
axis = "X" if ports1[0].orientation in [0, 180] else "Y"
if len(ports1) == 1 and len(ports2) == 1:
return [
generate_manhattan_waypoints(
ports1[0],
ports2[0],
start_straight_length=start_straight_length,
end_straight_length=end_straight_length,
cross_section=cross_section,
**kwargs,
)
]
# Contains end_straight of tracks which need to be adjusted together
end_straights_in_group = []
# Once a group is finished, all the lengths are appended to end_straights
end_straights = []
# Keep track of how many ports should be routed together
if axis in {"X", "x"}:
x1_prev = get_port_y(ports1[0])
x2_prev = get_port_y(ports2[0])
y0 = get_port_x(ports2[0])
y1 = get_port_x(ports1[0])
else: # X axis
x1_prev = get_port_x(ports1[0])
x2_prev = get_port_x(ports2[0])
y0 = get_port_y(ports2[0])
y1 = get_port_y(ports1[0])
s = sign(y0 - y1)
curr_end_straight = 0
end_straight_length = end_straight_length or 15.0
Le = end_straight_length
# First pass - loop on all the ports to find the tentative end_straights
for i in range(len(ports1)):
if axis in {"X", "x"}:
x1 = get_port_y(ports1[i])
x2 = get_port_y(ports2[i])
y = get_port_x(ports2[i])
else:
x1 = get_port_x(ports1[i])
x2 = get_port_x(ports2[i])
y = get_port_y(ports2[i])
if are_decoupled(x2, x2_prev, x1, x1_prev, sep=separation):
# If this metal track does not impact the previous one, then start a new
# group.
L = min(end_straights_in_group)
end_straights += [max(x - L, 0) + Le for x in end_straights_in_group]
# Start new group
end_straights_in_group = []
curr_end_straight = 0
elif x2 >= x1:
curr_end_straight += separation
else:
curr_end_straight -= separation
end_straights_in_group.append(curr_end_straight + (y - y0) * s)
x1_prev = x1
x2_prev = x2
# Append the last group
L = min(end_straights_in_group)
end_straights += [max(x - L, 0) + Le for x in end_straights_in_group]
# Second pass - route the ports pairwise
N = len(ports1)
return [
generate_manhattan_waypoints(
ports1[i],
ports2[i],
start_straight_length=start_straight_length,
end_straight_length=end_straights[i],
cross_section=cross_section,
**kwargs,
)
for i in range(N)
]
def compute_ports_max_displacement(ports1: List[Port], ports2: List[Port]) -> float:
if ports1[0].orientation in [0, 180]:
a1 = [p.y for p in ports1]
a2 = [p.y for p in ports2]
else:
a1 = [p.x for p in ports1]
a2 = [p.x for p in ports2]
return max(abs(max(a1) - min(a2)), abs(min(a1) - max(a2)))
def sign(x: float) -> int:
return 1 if x > 0 else -1
def get_min_spacing(
ports1: List[Port],
ports2: List[Port],
sep: float = 5.0,
radius: float = 5.0,
sort_ports: bool = True,
) -> float:
"""Returns the minimum amount of spacing in um required to create a \
fanout."""
axis = "X" if ports1[0].orientation in [0, 180] else "Y"
j = 0
min_j = 0
max_j = 0
if sort_ports:
if axis in {"X", "x"}:
ports1.sort(key=get_port_y)
ports2.sort(key=get_port_y)
else:
ports1.sort(key=get_port_x)
ports2.sort(key=get_port_x)
for port1, port2 in zip(ports1, ports2):
if axis in {"X", "x"}:
x1 = get_port_y(ports1)
x2 = get_port_y(port2)
else:
x1 = get_port_x(port1)
x2 = get_port_x(port2)
if x2 >= x1:
j += 1
else:
j -= 1
if j < min_j:
min_j = j
if j > max_j:
max_j = j
j = 0
return (max_j - min_j) * sep + 2 * radius + 1.0
def get_bundle_same_axis_no_grouping(
ports1: List[Port],
ports2: List[Port],
sep: float = 5.0,
route_filter: Callable = get_route,
start_straight_length: Optional[float] = None,
end_straight_length: Optional[float] = None,
sort_ports: bool = True,
cross_section: CrossSectionSpec = strip,
**kwargs,
) -> List[Route]:
r"""Returns a list of route elements.
Compared to get_bundle_same_axis, this function does not do any grouping.
It is not as smart for the routing, but it can fall back on arclinarc
connection if needed. We can also specify longer start_straight and end_straight
Semi auto routing for optical ports
The routing assumes manhattan routing between the different ports.
The strategy is to modify ``start_straight`` and ``end_straight`` for each
straight such that straights do not collide.
We want to connect something like this:
::
2 X X X X X X
|-----------| | | | | |-----------------------|
| |-----| | | |---------------| |
| | || |------| | |
1 X X X X X X
``start`` is at the bottom
``end`` is at the top
The general strategy is:
if x2 < x1, decrease ``start straight``, and increase ``end_straight``
(as seen on left two ports)
otherwise, decrease ``start_straight``, and increase ``end_straight``
(as seen on the last 3 right ports)
Args:
ports1: first list of optical ports.
ports2: second list of optical ports.
axis: specifies "X" or "Y" direction along which the port is going.
route_filter: ManhattanExpandedWgConnector or ManhattanWgConnector.
or any other connector function with the same input.
radius: bend radius. If unspecified, uses the default radius.
start_straight_length: offset on the starting length before the first bend.
end_straight_length: offset on the ending length after the last bend.
sort_ports: True -> sort the ports according to the axis.
False -> no sort applied.
cross_section: CrossSection or function that returns a cross_section.
Returns:
a list of routes the connecting straights.
"""
axis = "X" if ports1[0].orientation in [0, 180] else "Y"
elems = []
j = 0
# min and max offsets needed for avoiding collisions between straights
min_j = 0
max_j = 0
if sort_ports:
if axis in {"X", "x"}:
ports1.sort(key=get_port_y)
ports2.sort(key=get_port_y)
else:
ports1.sort(key=get_port_x)
ports2.sort(key=get_port_x)
# Compute max_j and min_j
for i in range(len(ports1)):
if axis in {"X", "x"}:
x1 = ports1[i].center[1]
x2 = ports2[i].center[1]
else:
x1 = ports1[i].center[0]
x2 = ports2[i].center[0]
if x2 >= x1:
j += 1
else:
j -= 1
if j < min_j:
min_j = j
if j > max_j:
max_j = j
j = 0
if start_straight_length is None:
start_straight_length = 0.2
if end_straight_length is None:
end_straight_length = 0.2
start_straight_length += max_j * sep
end_straight_length += -min_j * sep
# Do case with wire direct if the ys are close to each other
for i, _ in enumerate(ports1):
if axis in {"X", "x"}:
x1 = ports1[i].center[1]
x2 = ports2[i].center[1]
else:
x1 = ports1[i].center[0]
x2 = ports2[i].center[0]
s_straight = start_straight_length - j * sep
e_straight = j * sep + end_straight_length
elems += [
route_filter(
ports1[i],
ports2[i],
start_straight_length=s_straight,
end_straight_length=e_straight,
cross_section=cross_section,
**kwargs,
)
]
if x2 >= x1:
j += 1
else:
j -= 1
return elems
get_bundle_electrical = partial(
get_bundle, bend=wire_corner, cross_section="metal_routing"
)
get_bundle_electrical_multilayer = gf.partial(
get_bundle,
bend=via_corner,
cross_section=[
(gf.cross_section.metal2, (90, 270)),
("metal_routing", (0, 180)),
],
)
@gf.cell
def test_get_bundle_small() -> Component:
c = gf.Component()
c1 = c << gf.components.mmi2x2()
c2 = c << gf.components.mmi2x2()
c2.move((100, 40))
routes = get_bundle(
[c1.ports["o3"], c1.ports["o4"]],
[c2.ports["o1"], c2.ports["o2"]],
separation=5.0,
cross_section=gf.cross_section.strip(radius=5, layer=(2, 0))
# cross_section=gf.cross_section.strip,
)
for route in routes:
c.add(route.references)
assert np.isclose(route.length, 111.136), route.length
return c
if __name__ == "__main__":
# c = test_connect_corner(None, check=False)
# c = test_get_bundle_small()
# c = test_get_bundle_small()
# c = test_facing_ports()
# c = test_get_bundle_u_indirect()
# c = test_get_bundle_udirect()
# c = test_connect_corner()
import gdsfactory as gf
# c = gf.Component("get_bundle_none_orientation")
# pt = c << gf.components.pad_array(orientation=None, columns=3)
# pb = c << gf.components.pad_array(orientation=None, columns=3)
# pt.move((100, 200))
# routes = gf.routing.get_bundle_electrical_multilayer(
# pb.ports,
# pt.ports,
# start_straight_length=1,
# end_straight_length=10,
# separation=30,
# )
# for route in routes:
# c.add(route.references)
c = gf.Component("demo")
c1 = c << gf.components.mmi2x2()
c2 = c << gf.components.mmi2x2()
c2.move((100, 40))
routes = get_bundle(
[c1.ports["o2"], c1.ports["o1"]],
[c2.ports["o1"], c2.ports["o2"]],
radius=5,
# layer=(2, 0),
straight=gf.partial(gf.components.straight, layer=(1, 0), width=1),
)
for route in routes:
c.add(route.references)
c.show(show_ports=True)
| mit | ed3ac2fe9840e979afbfbac399ff4c9f | 31.37062 | 121 | 0.589492 | 3.499272 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/wafer.py | 1 | 1050 | import gdsfactory as gf
from gdsfactory.types import Component, ComponentSpec, Optional, Tuple
_cols_200mm_wafer = (2, 6, 6, 8, 8, 6, 6, 2)
@gf.cell
def wafer(
reticle: ComponentSpec = "die",
cols: Tuple[int, ...] = _cols_200mm_wafer,
xspacing: Optional[float] = None,
yspacing: Optional[float] = None,
) -> Component:
"""Returns complete wafer. Useful for mask aligner steps.
Args:
reticle: spec for each wafer reticle.
cols: how many columns per row.
xspacing: optional spacing, defaults to reticle.xsize.
yspacing: optional spacing, defaults to reticle.ysize.
"""
c = gf.Component()
reticle = gf.get_component(reticle)
xspacing = xspacing or reticle.xsize
yspacing = yspacing or reticle.ysize
for i, columns in enumerate(cols):
ref = c.add_array(
reticle, rows=1, columns=columns, spacing=(xspacing, yspacing)
)
ref.x = 0
ref.movey(i * yspacing)
return c
if __name__ == "__main__":
c = wafer()
c.show()
| mit | 18b655ee0d969b2369c463df1eb7b7e5 | 25.923077 | 74 | 0.622857 | 3.354633 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/samples/01_component_pcell.py | 1 | 2851 | """Based on phidl tutorial.
We'll start by assuming we have a function straight() which already
exists and makes us a simple straight waveguide. Many functions like
this exist in the gdsfactory.components library and are ready-for-use.
We write this one out fully just so it's explicitly clear what's
happening
"""
import gdsfactory as gf
from gdsfactory.types import LayerSpec
@gf.cell
def straight_wide(
length: float = 5.0, width: float = 1.0, layer: LayerSpec = (2, 0)
) -> gf.Component:
"""Returns straight Component.
Args:
length: of the straight.
width: in um.
layer: layer spec
"""
wg = gf.Component("straight_sample")
wg.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=layer)
wg.add_port(
name="o1", center=(0, width / 2), width=width, orientation=180, layer=layer
)
wg.add_port(
name="o2", center=(length, width / 2), width=width, orientation=0, layer=layer
)
return wg
def test_straight_wide(data_regression):
component = straight_wide()
data_regression.check(component.to_dict())
# ==============================================================================
# Create a blank component
# ==============================================================================
# Let's create a new Component ``c`` which will act as a blank canvas (c can be
# thought of as a blank GDS cell with some special features). Note that when we
# make a Component
if __name__ == "__main__":
c = gf.Component("MultiWaveguide")
# Now say we want to add a few straights to to our Component" c.
# First we create the straights. As you can see from the straight_wide() function
# definition, the sstraight_wide() function creates another Component ("WG").
# This can be thought of as the straight_wide() function creating another GDS cell,
# only this one has some geometry inside it.
#
# Let's create two of these Components by calling the straight_wide() function
WG1 = straight_wide(length=10, width=1)
WG2 = straight_wide(length=12, width=2)
# Now we've made two straights Component WG1 and WG2, and we have a blank
# Component c. We can add references from the devices WG1 and WG2 to our blank
# Component by using the add_ref() function.
# After adding WG1, we see that the add_ref() function returns a handle to our
# reference, which we will label with lowercase letters wg1 and wg2. This
# handle will be useful later when we want to move wg1 and wg2 around in c.
wg1 = c.add_ref(WG1) # Using the function add_ref()
wg2 = c << WG2 # Using the << operator which is identical to add_ref()
# Alternatively, we can do this all on one line
wg3 = c.add_ref(straight_wide(length=14, width=3))
c.show(show_ports=True) # show it in Klayout
| mit | 5cdc50bc34abb4c075eb6a65ad04e138 | 37.013333 | 87 | 0.642231 | 3.688228 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/tests/test_name_with_decorator.py | 1 | 2112 | import gdsfactory as gf
@gf.cell
def straight_with_pins(**kwargs):
c = gf.Component()
ref = c << gf.components.straight()
c.add_ports(ref.ports)
return c
def test_name_with_decorator():
c = gf.Component("test_name_with_decorator")
c1 = c << straight_with_pins(decorator=gf.add_padding)
c2 = c << straight_with_pins()
c1.movey(-10)
c2.movey(100)
cells = c.get_dependencies()
cell_names = [cell.name for cell in list(cells)]
cell_names_unique = set(cell_names)
if len(cell_names) != len(set(cell_names)):
for cell_name in cell_names_unique:
cell_names.remove(cell_name)
cell_names_duplicated = "\n".join(set(cell_names))
raise ValueError(
f"Duplicated cell names in {c.name!r}:\n{cell_names_duplicated}"
)
referenced_cells = list(c.get_dependencies(recursive=True))
all_cells = [c] + referenced_cells
no_name_cells = [cell.name for cell in all_cells if cell.name.startswith("Unnamed")]
assert (
not no_name_cells
), f"Component {c.name!r} contains {len(no_name_cells)} Unnamed cells"
if __name__ == "__main__":
c = gf.Component("test_name_with_decorator")
c1 = c << straight_with_pins(decorator=gf.add_padding)
c1.movey(-10)
c2 = c << straight_with_pins()
c2.movey(100)
cells = c.get_dependencies()
cell_names = [cell.name for cell in list(cells)]
cell_names_unique = set(cell_names)
if len(cell_names) != len(set(cell_names)):
for cell_name in cell_names_unique:
cell_names.remove(cell_name)
cell_names_duplicated = "\n".join(set(cell_names))
raise ValueError(
f"Duplicated cell names in {c.name!r}:\n{cell_names_duplicated}"
)
referenced_cells = list(c.get_dependencies(recursive=True))
all_cells = [c] + referenced_cells
no_name_cells = [cell.name for cell in all_cells if cell.name.startswith("Unnamed")]
# assert (
# not no_name_cells
# ), f"Component {c.name!r} contains {len(no_name_cells)} Unnamed cells"
c.show(show_ports=True)
| mit | 172361004f7573f0ef9b2beb385314e4 | 29.608696 | 88 | 0.624527 | 3.190332 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/modes/get_mode_solver_coupler.py | 1 | 6920 | import pathlib
import tempfile
from typing import Optional, Tuple, Union
import meep as mp
import numpy as np
import pydantic
from meep import mpb
mpb.Verbosity(0)
tmp = pathlib.Path(tempfile.TemporaryDirectory().name).parent / "meep"
tmp.mkdir(exist_ok=True)
Floats = Tuple[float, ...]
@pydantic.validate_arguments
def get_mode_solver_coupler(
wg_width: float = 0.5,
gap: float = 0.2,
wg_widths: Optional[Floats] = None,
gaps: Optional[Floats] = None,
wg_thickness: float = 0.22,
slab_thickness: float = 0.0,
ncore: float = 3.47,
nclad: float = 1.44,
nslab: Optional[float] = None,
ymargin: float = 2.0,
sz: float = 2.0,
resolution: int = 32,
nmodes: int = 4,
sidewall_angles: Union[Tuple[float, ...], float] = None,
) -> mpb.ModeSolver:
"""Returns mode_solver simulation.
Args:
wg_width: wg_width (um) for the symmetric case.
gap: for the case of only two waveguides.
wg_widths: list or tuple of waveguide widths.
gaps: list or tuple of waveguide gaps.
wg_thickness: wg thickness (um).
slab_thickness: thickness for the waveguide slab.
ncore: core material refractive index.
nclad: clad material refractive index.
nslab: Optional slab material refractive index. Defaults to ncore.
ymargin: margin in y.
sz: simulation region thickness (um).
resolution: resolution (pixels/um).
nmodes: number of modes.
sidewall_angles: waveguide sidewall angle (degrees),
tapers from wg_width at top of slab, upwards, to top of waveguide
a sidewall_angle = 10, will have 80 degrees with respect to the substrate.
::
_____________________________________________________
|
|
| widths[0] widths[1]
| <----------> gaps[0] <---------->
| ___________ <-------------> ___________ _
| | | | | |
sz|_____| ncore |_______________| |_____|
| | wg_thickness
|slab_thickness nslab |
|___________________________________________________|
|
|<---> <--->
|ymargin nclad ymargin
|____________________________________________________
<--------------------------------------------------->
sy
"""
wg_widths = wg_widths or (wg_width, wg_width)
gaps = gaps or (gap,)
material_core = mp.Medium(index=ncore)
material_clad = mp.Medium(index=nclad)
material_slab = mp.Medium(index=nslab or ncore)
# Define the computational cell. We'll make x the propagation direction.
# the other cell sizes should be big enough so that the boundaries are
# far away from the mode field.
sy = np.sum(wg_widths) + np.sum(gaps) + 2 * ymargin
geometry_lattice = mp.Lattice(size=mp.Vector3(0, sy, sz))
geometry = []
y = -sy / 2 + ymargin
gaps = list(gaps) + [0]
for i, wg_width in enumerate(wg_widths):
if sidewall_angles:
geometry.append(
mp.Prism(
vertices=[
mp.Vector3(y=y, z=slab_thickness),
mp.Vector3(y=y + wg_width, z=slab_thickness),
mp.Vector3(x=1, y=y + wg_width, z=slab_thickness),
mp.Vector3(x=1, y=y, z=slab_thickness),
],
height=wg_thickness - slab_thickness,
center=mp.Vector3(
y=y + wg_width / 2,
z=slab_thickness + (wg_thickness - slab_thickness) / 2,
),
# If only 1 angle is specified, use it for all waveguides
sidewall_angle=np.deg2rad(sidewall_angles)
if len(np.unique(sidewall_angles)) == 1
else np.deg2rad(sidewall_angles[i]),
material=material_core,
)
)
else:
geometry.append(
mp.Block(
size=mp.Vector3(mp.inf, wg_width, wg_thickness),
material=material_core,
center=mp.Vector3(y=y + wg_width / 2, z=wg_thickness / 2),
)
)
y += gaps[i] + wg_width
# define the 2D blocks for the strip and substrate
geometry += [
mp.Block(
size=mp.Vector3(mp.inf, mp.inf, slab_thickness),
material=material_slab,
center=mp.Vector3(z=slab_thickness / 2),
),
]
# The k (i.e. beta, i.e. propagation constant) points to look at, in
# units of 2*pi/um. We'll look at num_k points from k_min to k_max.
num_k = 9
k_min = 0.1
k_max = 3.0
k_points = mp.interpolate(num_k, [mp.Vector3(k_min), mp.Vector3(k_max)])
# Increase this to see more modes. (The guided ones are the ones below the
# light line, i.e. those with frequencies < kmag / 1.45, where kmag
# is the corresponding column in the output if you grep for "freqs:".)
# use this prefix for output files
wg_widths_str = "_".join([str(i) for i in wg_widths])
gaps_str = "_".join([str(i) for i in gaps])
filename_prefix = (
tmp / f"coupler_{wg_widths_str}_{gaps_str}_{wg_thickness}_{slab_thickness}"
)
mode_solver = mpb.ModeSolver(
geometry_lattice=geometry_lattice,
geometry=geometry,
k_points=k_points,
resolution=resolution,
num_bands=nmodes,
filename_prefix=str(filename_prefix),
default_material=material_clad,
)
mode_solver.nmodes = nmodes
mode_solver.info = dict(
wg_widths=wg_widths,
gaps=gaps,
wg_thickness=wg_thickness,
slab_thickness=slab_thickness,
ncore=ncore,
nclad=nclad,
sy=sy,
sz=sz,
resolution=resolution,
nmodes=nmodes,
)
return mode_solver
if __name__ == "__main__":
import matplotlib.pyplot as plt
m = get_mode_solver_coupler(
slab_thickness=90e-3,
nslab=2,
gap=0.5,
wg_width=1,
resolution=64,
sidewall_angles=(10.0, 20.0),
)
m.init_params(p=mp.NO_PARITY, reset_fields=False)
eps = m.get_epsilon()
# cmap = 'viridis'
# cmap = "RdBu"
cmap = "binary"
origin = "lower"
plt.imshow(
eps.T**0.5,
cmap=cmap,
origin=origin,
aspect="auto",
extent=[
-m.info["sy"] / 2,
m.info["sy"] / 2,
-m.info["sz"] / 2,
m.info["sz"] / 2,
],
)
plt.colorbar()
plt.show()
| mit | 46044249853bf8e0e4c1150b234504e2 | 32.110048 | 86 | 0.498266 | 3.609807 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/gtidy3d/write_sparameters.py | 1 | 10294 | import time
import numpy as np
import tidy3d as td
from omegaconf import OmegaConf
import gdsfactory as gf
from gdsfactory.config import logger
from gdsfactory.serialization import clean_value_json
from gdsfactory.simulation import port_symmetries
from gdsfactory.simulation.get_sparameters_path import (
get_sparameters_path_tidy3d as get_sparameters_path,
)
from gdsfactory.simulation.gtidy3d.get_results import _executor, get_results
from gdsfactory.simulation.gtidy3d.get_simulation import get_simulation, plot_simulation
from gdsfactory.types import (
Any,
ComponentSpec,
Dict,
List,
Optional,
PathType,
Port,
PortSymmetries,
Tuple,
)
def parse_port_eigenmode_coeff(
port_name: str, ports: Dict[str, Port], sim_data: td.SimulationData
) -> Tuple[np.ndarray]:
"""Given a port and eigenmode coefficient result, returns the coefficients \
relative to whether the wavevector is entering or exiting simulation.
Args:
port_name: port name.
ports: component_ref.ports.
sim_data: simulation data.
"""
# Direction of port (pointing away from the simulation)
# Figure out if that is exiting the simulation or not
# depending on the port orientation (assuming it's near PMLs)
orientation = ports[port_name].orientation
if orientation in [0, 90]: # east
direction_inp = "-"
direction_out = "+"
elif orientation in [180, 270]: # west
direction_inp = "+"
direction_out = "-"
else:
raise ValueError(
"Port orientation = {orientation} is not 0, 90, 180, or 270 degrees"
)
coeff_inp = sim_data.monitor_data[port_name].amps.sel(direction=direction_inp)
coeff_out = sim_data.monitor_data[port_name].amps.sel(direction=direction_out)
return coeff_inp.values.flatten(), coeff_out.values.flatten()
def get_wavelengths(port_name: str, sim_data: td.SimulationData) -> np.ndarray:
coeff_inp = sim_data.monitor_data[port_name].amps.sel(direction="+")
freqs = coeff_inp.f
return td.constants.C_0 / freqs.values
def write_sparameters(
component: ComponentSpec,
port_symmetries: Optional[PortSymmetries] = None,
port_source_names: Optional[List[str]] = None,
dirpath: Optional[PathType] = None,
run: bool = True,
overwrite: bool = False,
**kwargs,
) -> np.ndarray:
"""Get full sparameter matrix from a gdsfactory Component.
Simulates each time using a different input port (by default, all of them)
unless you specify port_symmetries.
port_symmetries = {"o1":
{
"s11": ["s22","s33","s44"],
"s21": ["s21","s34","s43"],
"s31": ["s13","s24","s42"],
"s41": ["s14","s23","s32"],
}
}
- Only simulations using the outer key port names will be run
- The associated value is another dict whose keys are the S-parameters computed
when this source is active
- The values of this inner Dict are lists of s-parameters whose values are copied
Args:
component: to simulate.
port_source_names: list of ports to excite. Defaults to all.
port_symmetries: Dict to specify port symmetries, to save number of simulations
dirpath: directory to store sparameters in npz.
Defaults to active Pdk.sparameters_path.
run: runs simulation, if False, only plots simulation.
overwrite: overwrites stored Sparameter npz results.
Keyword Args:
port_extension: extend ports beyond the PML.
layer_stack: contains layer to thickness, zmin and material.
Defaults to active pdk.layer_stack.
thickness_pml: PML thickness (um).
xmargin: left/right distance from component to PML.
xmargin_left: left distance from component to PML.
xmargin_right: right distance from component to PML.
ymargin: left/right distance from component to PML.
ymargin_top: top distance from component to PML.
ymargin_bot: bottom distance from component to PML.
zmargin: thickness for cladding above and below core.
clad_material: material for cladding.
port_margin: margin on each side of the port.
distance_source_to_monitors: in (um) source goes before monitors.
wavelength_start: in (um).
wavelength_stop: in (um).
wavelength_points: in (um).
plot_modes: plot source modes.
num_modes: number of modes to plot.
run_time_ps: make sure it's sufficient for the fields to decay.
defaults to 10ps and counts on automatic shutoff to stop earlier if needed.
dispersive: False uses constant refractive index materials.
True adds wavelength depending materials.
Dispersive materials require more computation.
material_name_to_tidy3d_index: not dispersive materials have a constant index.
material_name_to_tidy3d_name: dispersive materials have a wavelength
dependent index. Maps layer_stack names with tidy3d material database names.
is_3d: if False, does not consider Z dimension for faster simulations.
with_all_monitors: True adds field monitor which increases results file size.
grid_spec: defaults to automatic td.GridSpec.auto(wavelength=wavelength)
td.GridSpec.uniform(dl=20*nm)
td.GridSpec(
grid_x = td.UniformGrid(dl=0.04),
grid_y = td.AutoGrid(min_steps_per_wvl=20),
grid_z = td.AutoGrid(min_steps_per_wvl=20),
wavelength=wavelength,
override_structures=[refine_box]
)
dilation: float = 0.0
Dilation of the polygon in the base by shifting each edge along its
normal outwards direction by a distance;
a negative value corresponds to erosion.
sidewall_angle_deg : float = 0
Angle of the sidewall.
``sidewall_angle=0`` (default) specifies vertical wall,
while ``0<sidewall_angle_deg<90`` for the base to be larger than the top.
"""
component = gf.get_component(component)
filepath = get_sparameters_path(
component=component,
dirpath=dirpath,
**kwargs,
)
filepath_sim_settings = filepath.with_suffix(".yml")
if filepath.exists() and not overwrite and run:
logger.info(f"Simulation loaded from {filepath!r}")
return np.load(filepath)
port_symmetries = port_symmetries or {}
component_ref = component.ref()
ports = component_ref.ports
port_names = [port.name for port in list(ports.values())]
sims = []
sp = {}
port_source_names = port_source_names or port_names
for port_name in port_source_names:
if port_name not in port_symmetries:
sim = get_simulation(component, port_source_name=port_name, **kwargs)
sims.append(sim)
if not run:
sim = sims[0]
plot_simulation(sim)
return sp
start = time.time()
batch_data = get_results(sims, overwrite=overwrite)
def get_sparameter(
port_name_source: str,
sim_data: td.SimulationData,
port_symmetries=port_symmetries,
**kwargs,
) -> np.ndarray:
"""Return Component sparameter for a particular port Index n.
Args:
port_name: source port name.
sim_data: simulation data.
port_symmetries: to save simulations.
kwargs: simulation settings.
"""
source_entering, source_exiting = parse_port_eigenmode_coeff(
port_name=port_name_source, ports=component_ref.ports, sim_data=sim_data
)
for port_name in port_names:
monitor_entering, monitor_exiting = parse_port_eigenmode_coeff(
port_name=port_name, ports=ports, sim_data=sim_data
)
sij = monitor_exiting / source_entering
key = f"{port_name}@0,{port_name_source}@0"
sp[key] = sij
sp["wavelengths"] = get_wavelengths(port_name=port_name, sim_data=sim_data)
if bool(port_symmetries):
for key, symmetries in port_symmetries.items():
for sym in symmetries:
if key in sp:
sp[sym] = sp[key]
return sp
for port_source_name, (_sim_name, sim_data) in zip(
port_source_names, batch_data.items()
):
sp.update(get_sparameter(port_source_name, sim_data))
end = time.time()
np.savez_compressed(filepath, **sp)
kwargs.update(compute_time_seconds=end - start)
kwargs.update(compute_time_minutes=(end - start) / 60)
filepath_sim_settings.write_text(OmegaConf.to_yaml(clean_value_json(kwargs)))
logger.info(f"Write simulation results to {str(filepath)!r}")
logger.info(f"Write simulation settings to {str(filepath_sim_settings)!r}")
return sp
def write_sparameters_batch(jobs: List[Dict[str, Any]], **kwargs) -> List[np.ndarray]:
"""Returns Sparameters for a list of write_sparameters_grating_coupler kwargs \
where it runs each simulation in parallel.
Args:
jobs: list of kwargs for write_sparameters_grating_coupler.
kwargs: simulation settings.
"""
sp = [_executor.submit(write_sparameters, **job, **kwargs) for job in jobs]
return [spi.result() for spi in sp]
write_sparameters_1x1 = gf.partial(
write_sparameters, port_symmetries=port_symmetries.port_symmetries_1x1
)
write_sparameters_crossing = gf.partial(
write_sparameters, port_symmetries=port_symmetries.port_symmetries_crossing
)
write_sparameters_batch_1x1 = gf.partial(
write_sparameters_batch, port_symmetries=port_symmetries.port_symmetries_1x1
)
if __name__ == "__main__":
import gdsfactory as gf
import gdsfactory.simulation as sim
# c = gf.components.straight(length=2.1)
c = gf.c.straight()
c = gf.components.mmi1x2()
sp = write_sparameters(c, is_3d=True, port_source_names=None, overwrite=False)
sim.plot.plot_sparameters(sp)
# t = sp.o1@0,o2@0
# print(f"Transmission = {t}")
# cs = [gf.c.straight(length=1.11 + i) for i in [1, 2]]
# sps = write_sparameters_batch_1x1(cs)
| mit | 5e88b9d7ea4cbad0bf94bdbbccca5008 | 36.569343 | 88 | 0.648436 | 3.70421 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/coh_tx_dual_pol.py | 1 | 5107 | from typing import Optional
import gdsfactory as gf
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.routing.get_route import get_route
from gdsfactory.types import ComponentSpec, CrossSectionSpec
@cell
def coh_tx_dual_pol(
splitter: ComponentSpec = "mmi1x2",
combiner: Optional[ComponentSpec] = None,
spol_coh_tx: ComponentSpec = "coh_tx_single_pol",
yspacing: float = 10.0,
xspacing: float = 40.0,
input_coupler: Optional[ComponentSpec] = None,
output_coupler: Optional[ComponentSpec] = None,
cross_section: CrossSectionSpec = "strip",
**kwargs
) -> Component:
"""Dual polarization coherent transmitter.
Args:
splitter: splitter function.
combiner: combiner function.
spol_coh_tx: function generating a coherent tx for a single polarization.
yspacing: vertical spacing between each single polarization coherent tx.
xspacing: horizontal spacing between splitter and combiner.
input_coupler: Optional coupler to add before the splitter.
output_coupler: Optioncal coupler to add after the combiner.
cross_section: for routing (splitter to mzms and mzms to combiners).
kwargs: cross_section settings.
.. code::
___ single_pol_tx__
| |
| |
| |
(in_coupler)---splitter==| |==combiner---(out_coupler)
| |
| |
|___ single_pol_tx_|
"""
spol_coh_tx = gf.get_component(spol_coh_tx)
# ----- Draw single pol coherent transmitters -----
# Add MZM 1
c = Component()
single_tx_1 = c << spol_coh_tx
single_tx_2 = c << spol_coh_tx
# Separate the two receivers
single_tx_2.movey(single_tx_1.ymin - yspacing - single_tx_2.ymax)
# ------------ Splitters and combiners ---------------
splitter = gf.get_component(splitter)
sp = c << splitter
sp.x = single_tx_1.xmin - xspacing
sp.y = (single_tx_1.ports["o1"].y + single_tx_2.ports["o1"].y) / 2
route = get_route(
sp.ports["o2"],
single_tx_1.ports["o1"],
cross_section=cross_section,
with_sbend=False,
**kwargs
)
c.add(route.references)
route = get_route(
sp.ports["o3"],
single_tx_2.ports["o1"],
cross_section=cross_section,
with_sbend=False,
**kwargs
)
c.add(route.references)
if combiner:
combiner = gf.get_component(combiner)
comb = c << combiner
comb.mirror()
comb.x = single_tx_1.xmax + xspacing
comb.y = (single_tx_1.ports["o2"].y + single_tx_2.ports["o2"].y) / 2
route = get_route(
comb.ports["o2"],
single_tx_1.ports["o2"],
cross_section=cross_section,
with_sbend=False,
**kwargs
)
c.add(route.references)
route = get_route(
comb.ports["o3"],
single_tx_2.ports["o2"],
cross_section=cross_section,
with_sbend=False,
**kwargs
)
c.add(route.references)
# ------- In and out couplers (if indicated) -----
if input_coupler:
# Add input coupler
in_coupler = gf.get_component(input_coupler)
in_coup = c << in_coupler
in_coup.connect("o1", sp.ports["o1"])
else:
c.add_port("o1", port=sp.ports["o1"])
if output_coupler:
output_coupler = gf.get_component(output_coupler)
out_coup = c << output_coupler
if combiner:
# Add output coupler
out_coup.connect("o1", comb.ports["o1"])
else:
# Directly connect the output coupler to the branches.
# Assumes the output couplers has ports "o1" and "o2"
out_coup.y = (single_tx_1.y + single_tx_2.y) / 2
out_coup.xmin = single_tx_1.xmax + 40.0
route = get_route(
single_tx_1.ports["o2"],
out_coup.ports["o1"],
cross_section=cross_section,
with_sbend=False,
**kwargs
)
c.add(route.references)
route = get_route(
single_tx_2.ports["o2"],
out_coup.ports["o2"],
cross_section=cross_section,
with_sbend=False,
**kwargs
)
c.add(route.references)
else:
c.add_port("o2", port=single_tx_1.ports["o2"])
c.add_port("o3", port=single_tx_2.ports["o2"])
c.add_ports(single_tx_1.get_ports_list(port_type="electrical"), prefix="pol1")
c.add_ports(single_tx_2.get_ports_list(port_type="electrical"), prefix="pol2")
c.auto_rename_ports()
return c
if __name__ == "__main__":
c = coh_tx_dual_pol()
c.show(show_ports=True)
| mit | 19313d07c5c727385f1e13487a7516c2 | 30.524691 | 82 | 0.531623 | 3.576331 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/C.py | 1 | 1168 | from typing import Tuple
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.types import LayerSpec
@gf.cell
def C(
width: float = 1.0,
size: Tuple[float, float] = (10.0, 20.0),
layer: LayerSpec = "WG",
) -> Component:
"""C geometry with ports on both ends.
based on phidl.
Args:
width: of the line.
size: length and height of the base.
layer: layer spec.
.. code::
______
| o1
| ___
| |
| |___
||<---> size[0]
|______ o2
"""
layer = gf.get_layer(layer)
c = Component()
w = width / 2
s1, s2 = size
points = [
(-w, -w),
(s1, -w),
(s1, w),
(w, w),
(w, s2 - w),
(s1, s2 - w),
(s1, s2 + w),
(-w, s2 + w),
(-w, -w),
]
c.add_polygon(points, layer=layer)
c.add_port(name="o1", center=(s1, s2), width=width, orientation=0, layer=layer)
c.add_port(name="o2", center=(s1, 0), width=width, orientation=0, layer=layer)
return c
if __name__ == "__main__":
c = C(width=1.0)
c.show(show_ports=True)
| mit | 0a2ce9af2e1c6e24c20293946da09b15 | 19.491228 | 83 | 0.480308 | 3.002571 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/pdk.py | 1 | 19613 | """PDK stores layers, cross_sections, cell functions ..."""
import logging
import pathlib
import warnings
from functools import partial
from typing import Any, Callable, Optional
import numpy as np
from omegaconf import DictConfig
from pydantic import BaseModel, Field, validator
from gdsfactory.components import cells
from gdsfactory.config import PATH, sparameters_path
from gdsfactory.containers import containers as containers_default
from gdsfactory.cross_section import cross_sections
from gdsfactory.events import Event
from gdsfactory.layers import LAYER_COLORS, LayerColors
from gdsfactory.read.from_yaml import from_yaml
from gdsfactory.show import show
from gdsfactory.tech import LAYER, LAYER_STACK, LayerStack
from gdsfactory.types import (
CellSpec,
Component,
ComponentFactory,
ComponentSpec,
CrossSection,
CrossSectionFactory,
CrossSectionSpec,
Dict,
Layer,
LayerSpec,
PathType,
)
logger = logging.root
component_settings = ["function", "component", "settings"]
cross_section_settings = ["function", "cross_section", "settings"]
layers_required = ["DEVREC", "PORT", "PORTE"]
constants = dict(
fiber_array_spacing=127.0,
fiber_spacing=50.0,
fiber_input_to_output_spacing=200.0,
metal_spacing=10.0,
)
class Pdk(BaseModel):
"""Store layers, cross_sections, cell functions, simulation_settings ...
only one Pdk can be active at a given time.
Parameters:
name: PDK name.
cross_sections: dict of cross_sections factories.
cells: dict of parametric cells that return Components.
containers: dict of pcells that contain other cells.
base_pdk: a pdk to copy from and extend.
default_decorator: decorate all cells, if not otherwise defined on the cell.
layers: maps name to gdslayer/datatype.
For example dict(si=(1, 0), sin=(34, 0)).
layer_stack: maps name to layer numbers, thickness, zmin, sidewall_angle.
if can also contain material properties
(refractive index, nonlinear coefficient, sheet resistance ...).
layer_colors: includes layer name to color, opacity and pattern.
sparameters_path: to store Sparameters simulations.
modes_path: to store Sparameters simulations.
interconnect_cml_path: path to interconnect CML (optional).
grid_size: in um. Defaults to 1nm.
warn_off_grid_ports: raises warning when extruding paths with offgrid ports.
For example, if you try to create a waveguide with 1.5nm length.
constants: dict of constants for the PDK.
"""
name: str
cross_sections: Dict[str, CrossSectionFactory] = Field(default_factory=dict)
cells: Dict[str, ComponentFactory] = Field(default_factory=dict)
containers: Dict[str, ComponentFactory] = containers_default
base_pdk: Optional["Pdk"] = None
default_decorator: Optional[Callable[[Component], None]] = None
layers: Dict[str, Layer] = Field(default_factory=dict)
layer_stack: Optional[LayerStack] = None
layer_colors: Optional[LayerColors] = None
sparameters_path: Optional[PathType] = None
modes_path: Optional[PathType] = PATH.modes
interconnect_cml_path: Optional[PathType] = None
grid_size: float = 0.001
warn_off_grid_ports: bool = False
constants: Dict[str, Any] = constants
class Config:
"""Configuration."""
extra = "forbid"
fields = {
"cross_sections": {"exclude": True},
"cells": {"exclude": True},
"containers": {"exclude": True},
"default_decorator": {"exclude": True},
}
@validator("sparameters_path")
def is_pathlib_path(cls, path):
return pathlib.Path(path)
def validate_layers(self):
for layer in layers_required:
if layer not in self.layers:
raise ValueError(
f"{layer!r} not in Pdk.layers {list(self.layers.keys())}"
)
def activate(self) -> None:
"""Set current pdk to as the active pdk."""
from gdsfactory.cell import clear_cache
clear_cache()
if self.base_pdk:
cross_sections = self.base_pdk.cross_sections
cross_sections.update(self.cross_sections)
self.cross_sections = cross_sections
cells = self.base_pdk.cells
cells.update(self.cells)
self.cells.update(cells)
containers = self.base_pdk.containers
containers.update(self.containers)
self.containers.update(containers)
layers = self.base_pdk.layers
layers.update(self.layers)
self.layers.update(layers)
if not self.default_decorator:
self.default_decorator = self.base_pdk.default_decorator
self.validate_layers()
_set_active_pdk(self)
def register_cells(self, **kwargs) -> None:
"""Register cell factories."""
for name, cell in kwargs.items():
if not callable(cell):
raise ValueError(
f"{cell} is not callable, make sure you register "
"cells functions that return a Component"
)
if name in self.cells:
warnings.warn(f"Overwriting cell {name!r}")
self.cells[name] = cell
on_cell_registered.fire(name=name, cell=cell, pdk=self)
def register_containers(self, **kwargs) -> None:
"""Register container factories."""
for name, cell in kwargs.items():
if not callable(cell):
raise ValueError(
f"{cell} is not callable, make sure you register "
"cells functions that return a Component"
)
if name in self.containers:
warnings.warn(f"Overwriting container {name!r}")
self.containers[name] = cell
on_container_registered.fire(name=name, cell=cell, pdk=self)
def register_cross_sections(self, **kwargs) -> None:
"""Register cross_sections factories."""
for name, cross_section in kwargs.items():
if not callable(cross_section):
raise ValueError(
f"{cross_section} is not callable, make sure you register "
"cross_section functions that return a CrossSection"
)
if name in self.cross_sections:
warnings.warn(f"Overwriting cross_section {name!r}")
self.cross_sections[name] = cross_section
on_cross_section_registered.fire(
name=name, cross_section=cross_section, pdk=self
)
def register_cells_yaml(
self,
dirpath: Optional[PathType] = None,
update: bool = False,
**kwargs,
) -> None:
"""Load *.pic.yml YAML files and register them as cells.
Args:
dirpath: directory to recursive search for YAML cells.
update: does not raise ValueError if cell already registered.
Keyword Args:
cell_name: cell function. To update cells dict.
"""
message = "Updated" if update else "Registered"
if dirpath:
dirpath = pathlib.Path(dirpath)
if not dirpath.is_dir():
raise ValueError(f"{dirpath!r} needs to be a directory.")
for filepath in dirpath.glob("*/**/*.pic.yml"):
name = filepath.stem.split(".")[0]
if not update and name in self.cells:
raise ValueError(
f"ERROR: Cell name {name!r} from {filepath} already registered."
)
self.cells[name] = partial(from_yaml, filepath)
on_yaml_cell_registered.fire(name=name, cell=self.cells[name], pdk=self)
logger.info(f"{message} cell {name!r}")
for k, v in kwargs.items():
if not update and k in self.cells:
raise ValueError(f"ERROR: Cell name {k!r} already registered.")
self.cells[k] = v
logger.info(f"{message} cell {k!r}")
def remove_cell(self, name: str):
if name not in self.cells:
raise ValueError(f"{name!r} not in {list(self.cells.keys())}")
self.cells.pop(name)
logger.info(f"Removed cell {name!r}")
def get_cell(self, cell: CellSpec, **kwargs) -> ComponentFactory:
"""Returns ComponentFactory from a cell spec."""
cells_and_containers = set(self.cells.keys()).union(set(self.containers.keys()))
if callable(cell):
return cell
elif isinstance(cell, str):
if cell not in cells_and_containers:
cells = list(self.cells.keys())
containers = list(self.containers.keys())
raise ValueError(
f"{cell!r} from PDK {self.name!r} not in cells: {cells} "
f"or containers: {containers}"
)
cell = self.cells[cell] if cell in self.cells else self.containers[cell]
return cell
elif isinstance(cell, (dict, DictConfig)):
for key in cell.keys():
if key not in component_settings:
raise ValueError(
f"Invalid setting {key!r} not in {component_settings}"
)
settings = dict(cell.get("settings", {}))
settings.update(**kwargs)
cell_name = cell.get("function")
if not isinstance(cell_name, str) or cell_name not in cells_and_containers:
cells = list(self.cells.keys())
containers = list(self.containers.keys())
raise ValueError(
f"{cell_name!r} from PDK {self.name!r} not in cells: {cells} "
f"or containers: {containers}"
)
cell = (
self.cells[cell_name]
if cell_name in self.cells
else self.containers[cell_name]
)
return partial(cell, **settings)
else:
raise ValueError(
"get_cell expects a CellSpec (ComponentFactory, string or dict),"
f"got {type(cell)}"
)
def get_component(self, component: ComponentSpec, **kwargs) -> Component:
"""Returns component from a component spec."""
cells_and_containers = set(self.cells.keys()).union(set(self.containers.keys()))
if isinstance(component, Component):
if kwargs:
raise ValueError(f"Cannot apply kwargs {kwargs} to {component.name!r}")
return component
elif callable(component):
return component(**kwargs)
elif isinstance(component, str):
if component not in cells_and_containers:
cells = list(self.cells.keys())
containers = list(self.containers.keys())
raise ValueError(
f"{component!r} not in PDK {self.name!r} cells: {cells} "
f"or containers: {containers}"
)
cell = (
self.cells[component]
if component in self.cells
else self.containers[component]
)
return cell(**kwargs)
elif isinstance(component, (dict, DictConfig)):
for key in component.keys():
if key not in component_settings:
raise ValueError(
f"Invalid setting {key!r} not in {component_settings}"
)
settings = dict(component.get("settings", {}))
settings.update(**kwargs)
cell_name = component.get("component", None)
cell_name = cell_name or component.get("function")
if not isinstance(cell_name, str) or cell_name not in cells_and_containers:
cells = list(self.cells.keys())
containers = list(self.containers.keys())
raise ValueError(
f"{cell_name!r} from PDK {self.name!r} not in cells: {cells} "
f"or containers: {containers}"
)
cell = (
self.cells[cell_name]
if cell_name in self.cells
else self.containers[cell_name]
)
component = cell(**settings)
return component
else:
raise ValueError(
"get_component expects a ComponentSpec (Component, ComponentFactory, "
f"string or dict), got {type(component)}"
)
def get_cross_section(
self, cross_section: CrossSectionSpec, **kwargs
) -> CrossSection:
"""Returns cross_section from a cross_section spec."""
if isinstance(cross_section, CrossSection):
if kwargs:
raise ValueError(f"Cannot apply {kwargs} to a defined CrossSection")
return cross_section
elif callable(cross_section):
return cross_section(**kwargs)
elif isinstance(cross_section, str):
if cross_section not in self.cross_sections:
cross_sections = list(self.cross_sections.keys())
raise ValueError(f"{cross_section!r} not in {cross_sections}")
cross_section_factory = self.cross_sections[cross_section]
return cross_section_factory(**kwargs)
elif isinstance(cross_section, (dict, DictConfig)):
for key in cross_section.keys():
if key not in cross_section_settings:
raise ValueError(
f"Invalid setting {key!r} not in {cross_section_settings}"
)
cross_section_factory_name = cross_section.get("cross_section", None)
cross_section_factory_name = (
cross_section_factory_name or cross_section.get("function")
)
if (
not isinstance(cross_section_factory_name, str)
or cross_section_factory_name not in self.cross_sections
):
cross_sections = list(self.cross_sections.keys())
raise ValueError(
f"{cross_section_factory_name!r} not in {cross_sections}"
)
cross_section_factory = self.cross_sections[cross_section_factory_name]
settings = dict(cross_section.get("settings", {}))
settings.update(**kwargs)
return cross_section_factory(**settings)
else:
raise ValueError(
"get_cross_section expects a CrossSectionSpec (CrossSection, "
f"CrossSectionFactory, string or dict), got {type(cross_section)}"
)
def get_layer(self, layer: LayerSpec) -> Layer:
"""Returns layer from a layer spec."""
if isinstance(layer, (tuple, list)):
if len(layer) != 2:
raise ValueError(f"{layer!r} needs two integer numbers.")
return layer
elif isinstance(layer, int):
return (layer, 0)
elif isinstance(layer, str):
if layer not in self.layers:
raise ValueError(f"{layer!r} not in {self.layers.keys()}")
return self.layers[layer]
elif layer is np.nan:
return np.nan
elif layer is None:
return
else:
raise ValueError(
f"{layer!r} needs to be a LayerSpec (string, int or Layer)"
)
def get_layer_colors(self) -> LayerColors:
if self.layer_colors is None:
raise ValueError(f"layer_colors for Pdk {self.name!r} is None")
return self.layer_colors
def get_layer_stack(self) -> LayerStack:
if self.layer_stack is None:
raise ValueError(f"layer_stack for Pdk {self.name!r} is None")
return self.layer_stack
def get_constant(self, key: str) -> Any:
if not isinstance(key, str):
return key
if key not in self.constants:
constants = list(self.constants.keys())
raise ValueError(f"{key!r} not in {constants}")
return self.constants[key]
# _on_cell_registered = Event()
# _on_container_registered: Event = Event()
# _on_yaml_cell_registered: Event = Event()
# _on_cross_section_registered: Event = Event()
#
# @property
# def on_cell_registered(self) -> Event:
# return self._on_cell_registered
#
# @property
# def on_container_registered(self) -> Event:
# return self._on_container_registered
#
# @property
# def on_yaml_cell_registered(self) -> Event:
# return self._on_yaml_cell_registered
#
# @property
# def on_cross_section_registered(self) -> Event:
# return self._on_cross_section_registered
GENERIC = Pdk(
name="generic",
cross_sections=cross_sections,
cells=cells,
layers=LAYER.dict(),
layer_stack=LAYER_STACK,
layer_colors=LAYER_COLORS,
sparameters_path=sparameters_path,
)
_ACTIVE_PDK = GENERIC
def get_component(component: ComponentSpec, **kwargs) -> Component:
return _ACTIVE_PDK.get_component(component, **kwargs)
def get_cell(cell: CellSpec, **kwargs) -> ComponentFactory:
return _ACTIVE_PDK.get_cell(cell, **kwargs)
def get_cross_section(cross_section: CrossSectionSpec, **kwargs) -> CrossSection:
return _ACTIVE_PDK.get_cross_section(cross_section, **kwargs)
def get_layer(layer: LayerSpec) -> Layer:
return _ACTIVE_PDK.get_layer(layer)
def get_layer_colors() -> LayerColors:
return _ACTIVE_PDK.get_layer_colors()
def get_layer_stack() -> LayerStack:
return _ACTIVE_PDK.get_layer_stack()
def get_active_pdk() -> Pdk:
return _ACTIVE_PDK
def get_grid_size() -> float:
return _ACTIVE_PDK.grid_size
def get_constant(constant_name: Any) -> Any:
"""If constant_name is a string returns a the value from the dict."""
return _ACTIVE_PDK.get_constant(constant_name)
def get_sparameters_path() -> pathlib.Path:
if _ACTIVE_PDK.sparameters_path is None:
raise ValueError(f"{_ACTIVE_PDK.name!r} has no sparameters_path")
return _ACTIVE_PDK.sparameters_path
def get_modes_path() -> Optional[pathlib.Path]:
return _ACTIVE_PDK.modes_path
def get_interconnect_cml_path() -> pathlib.Path:
if _ACTIVE_PDK.interconnect_cml_path is None:
raise ValueError(f"{_ACTIVE_PDK.name!r} has no interconnect_cml_path")
return _ACTIVE_PDK.interconnect_cml_path
def _set_active_pdk(pdk: Pdk) -> None:
global _ACTIVE_PDK
old_pdk = _ACTIVE_PDK
_ACTIVE_PDK = pdk
on_pdk_activated.fire(old_pdk=old_pdk, new_pdk=pdk)
on_pdk_activated: Event = Event()
on_cell_registered: Event = Event()
on_container_registered: Event = Event()
on_yaml_cell_registered: Event = Event()
on_yaml_cell_modified: Event = Event()
on_cross_section_registered: Event = Event()
on_container_registered.add_handler(on_cell_registered.fire)
on_yaml_cell_registered.add_handler(on_cell_registered.fire)
on_yaml_cell_modified.add_handler(show)
if __name__ == "__main__":
# c = _ACTIVE_PDK.get_component("straight")
# print(c.settings)
# on_pdk_activated += print
# set_active_pdk(GENERIC)
c = Pdk(
name="demo",
cells=cells,
cross_sections=cross_sections,
# layers=dict(DEVREC=(3, 0), PORTE=(3, 5)),
sparameters_path="/home",
)
print(c.layers)
| mit | e781e5cfb6c35ee92bef15669b12b243 | 35.728464 | 88 | 0.592056 | 4.110878 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/simulation/gmeep/get_simulation_grating_farfield.py | 1 | 15663 | """FIXME: needs some work.
- figure out get_farfield outputs
- add tutorial in docs/notebooks/plugins/meep/002_gratings.ipynb
- add filecache
- benchmark with lumerical and tidy3d
- add tests
"""
from typing import Any, Dict, Optional
import meep as mp
import numpy as np
from gdsfactory.types import Floats
nm = 1e-3
nSi = 3.48
nSiO2 = 1.44
def fiber_ncore(fiber_numerical_aperture, fiber_nclad):
return (fiber_numerical_aperture**2 + fiber_nclad**2) ** 0.5
def get_simulation_grating_farfield(
period: float = 0.66,
fill_factor: float = 0.5,
n_periods: int = 30,
widths: Optional[Floats] = None,
gaps: Optional[Floats] = None,
etch_depth: float = 70 * nm,
fiber_angle_deg: float = 20.0,
fiber_xposition: float = 1.0,
fiber_core_diameter: float = 10.4,
fiber_numerical_aperture: float = 0.14,
fiber_nclad: float = nSiO2,
ncore: float = nSi,
nclad: float = nSiO2,
nsubstrate: float = nSi,
pml_thickness: float = 1,
box_thickness: float = 2.0,
clad_thickness: float = 2.0,
core_thickness: float = 220 * nm,
resolution: int = 64, # pixels/um
wavelength_min: float = 1.5,
wavelength_max: float = 1.6,
wavelength_points: int = 50,
) -> Dict[str, Any]:
"""Returns grating coupler far field simulation.
FIXME! needs some more work.
na**2 = ncore**2 - nclad**2
ncore = sqrt(na**2 + ncore**2)
Args:
period: fiber grating period.
fill_factor: fraction of the grating period
filled with the grating material.
n_periods: number of periods
widths: Optional list of widths.
Overrides period, fill_factor, n_periods.
gaps: Optional list of gaps. Overrides period, fill_factor, n_periods.
etch_depth: grating etch depth.
fiber_angle_deg: fiber angle in degrees.
fiber_xposition: xposition.
fiber_core_diameter: fiber diameter.
fiber_numerical_aperture: NA.
fiber_nclad: fiber cladding index.
ncore: fiber index core.
nclad: top cladding index.
nbox: box index bottom.
nsubstrate: index substrate.
pml_thickness: pml_thickness (um).
substrate_thickness: substrate_thickness (um).
box_thickness: thickness for bottom cladding (um).
core_thickness: core_thickness (um).
top_clad_thickness: thickness of the top cladding.
air_gap_thickness: air gap thickness.
resolution: resolution pixels/um.
wavelength_min: min wavelength (um).
wavelength_max: max wavelength (um).
wavelength_points: wavelength points.
Some parameters are different from get_simulation_grating_fiber
fiber_thickness: fiber_thickness.
"""
wavelengths = np.linspace(wavelength_min, wavelength_max, wavelength_points)
wavelength = np.mean(wavelengths)
freqs = 1 / wavelengths
widths = widths or n_periods * [period * fill_factor]
gaps = gaps or n_periods * [period * (1 - fill_factor)]
settings = dict(
period=period,
fill_factor=fill_factor,
fiber_angle_deg=fiber_angle_deg,
fiber_xposition=fiber_xposition,
fiber_core_diameter=fiber_core_diameter,
fiber_numerical_aperture=fiber_core_diameter,
fiber_nclad=fiber_nclad,
resolution=resolution,
ncore=ncore,
nclad=nclad,
nsubstrate=nsubstrate,
n_periods=n_periods,
box_thickness=box_thickness,
clad_thickness=clad_thickness,
etch_depth=etch_depth,
wavelength_min=wavelength_min,
wavelength_max=wavelength_max,
wavelength_points=wavelength_points,
widths=widths,
gaps=gaps,
)
length_grating = np.sum(widths) + np.sum(gaps)
substrate_thickness = 1.0
hair = 4
core_material = mp.Medium(index=ncore)
clad_material = mp.Medium(index=nclad)
fiber_angle = np.radians(fiber_angle_deg)
y_offset = 0
x_offset = 0
# Minimally-parametrized computational cell
# Could be further optimized
# X-domain
dbufferx = 0.5
if length_grating < 3 * fiber_core_diameter:
sxy = 3 * fiber_core_diameter + 2 * dbufferx + 2 * pml_thickness
else: # Fiber probably to the left
sxy = (
3 / 2 * fiber_core_diameter
+ length_grating / 2
+ 2 * dbufferx
+ 2 * pml_thickness
)
# Useful reference points
cell_edge_left = -sxy / 2 + dbufferx + pml_thickness
grating_start = -fiber_xposition
# Y-domain (using z notation from 3D legacy code)
dbuffery = 0.5
sz = (
2 * dbuffery
+ box_thickness
+ core_thickness
+ hair
+ substrate_thickness
+ 2 * pml_thickness
)
# Initialize domain x-z plane simulation
cell_size = mp.Vector3(sxy, sz)
# Ports (position, sizes, directions)
fiber_offset_from_angle = (clad_thickness + core_thickness) * np.tan(fiber_angle)
fiber_port_center = mp.Vector3(
(0.5 * sz - pml_thickness + y_offset - 1) * np.sin(fiber_angle)
+ cell_edge_left
+ 3 / 2 * fiber_core_diameter
- fiber_offset_from_angle,
0.5 * sz - pml_thickness + y_offset - 1,
)
fiber_port_size = mp.Vector3(3 * fiber_core_diameter, 0, 0)
# fiber_port_direction = mp.Vector3(y=-1).rotate(mp.Vector3(z=1), -1 * fiber_angle)
waveguide_port_center = mp.Vector3(-sxy / 4)
waveguide_port_size = mp.Vector3(0, 2 * clad_thickness - 0.2)
waveguide_port_direction = mp.X
# Geometry
fiber_clad = 120
hfiber_geom = 100 # Some large number to make fiber extend into PML
fiber_ncore = (fiber_numerical_aperture**2 + fiber_nclad**2) ** 0.5
fiber_clad_material = mp.Medium(index=fiber_nclad)
fiber_core_material = mp.Medium(index=fiber_ncore)
geometry = [
mp.Block(
material=fiber_clad_material,
center=mp.Vector3(
x=grating_start + fiber_xposition - fiber_offset_from_angle
),
size=mp.Vector3(fiber_clad, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
)
]
geometry.append(
mp.Block(
material=fiber_core_material,
center=mp.Vector3(
x=grating_start + fiber_xposition - fiber_offset_from_angle
),
size=mp.Vector3(fiber_core_diameter, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
)
)
# clad
geometry.append(
mp.Block(
material=clad_material,
center=mp.Vector3(0, clad_thickness / 2),
size=mp.Vector3(mp.inf, clad_thickness),
)
)
# BOX
geometry.append(
mp.Block(
material=clad_material,
center=mp.Vector3(0, -0.5 * box_thickness),
size=mp.Vector3(mp.inf, box_thickness),
)
)
# waveguide
geometry.append(
mp.Block(
material=core_material,
center=mp.Vector3(0, core_thickness / 2),
size=mp.Vector3(mp.inf, core_thickness),
)
)
# grating etch
x = grating_start
for width, gap in zip(widths, gaps):
geometry.append(
mp.Block(
material=clad_material,
center=mp.Vector3(x + gap / 2, core_thickness - etch_depth / 2),
size=mp.Vector3(gap, etch_depth),
)
)
x += width + gap
# Substrate
geometry.append(
mp.Block(
material=mp.Medium(index=nsubstrate),
center=mp.Vector3(
0,
-0.5 * (core_thickness + substrate_thickness + pml_thickness + dbuffery)
- box_thickness,
),
size=mp.Vector3(mp.inf, substrate_thickness + pml_thickness + dbuffery),
)
)
# PMLs
boundary_layers = [mp.PML(pml_thickness)]
# mode frequency
fcen = 1 / wavelength
# Waveguide source
sources_directions = [mp.X]
sources = [
mp.EigenModeSource(
src=mp.GaussianSource(fcen, fwidth=0.1 * fcen),
size=waveguide_port_size,
center=waveguide_port_center,
eig_band=1,
direction=sources_directions[0],
eig_match_freq=True,
eig_parity=mp.ODD_Z,
)
]
# Ports
waveguide_monitor_port = mp.ModeRegion(
center=waveguide_port_center + mp.Vector3(x=0.2), size=waveguide_port_size
)
fiber_monitor_port = mp.ModeRegion(
center=fiber_port_center - mp.Vector3(y=0.2), size=fiber_port_size
)
sim = mp.Simulation(
resolution=resolution,
cell_size=cell_size,
boundary_layers=boundary_layers,
geometry=geometry,
sources=sources,
dimensions=2,
eps_averaging=True,
)
offset_vector = mp.Vector3(x_offset, y_offset)
nearfield = sim.add_near2far(
fcen,
0,
1,
mp.Near2FarRegion(
mp.Vector3(x_offset, 0.5 * sz - pml_thickness + y_offset) - offset_vector,
size=mp.Vector3(sxy - 2 * pml_thickness, 0),
),
)
waveguide_monitor = sim.add_mode_monitor(
freqs, waveguide_monitor_port, yee_grid=True
)
fiber_monitor = sim.add_mode_monitor(freqs, fiber_monitor_port)
field_monitor_point = (0, 0, 0)
return dict(
sim=sim,
cell_size=cell_size,
freqs=freqs,
fcen=fcen,
waveguide_monitor=waveguide_monitor,
waveguide_port_direction=waveguide_port_direction,
fiber_monitor=fiber_monitor,
fiber_angle_deg=fiber_angle_deg,
sources=sources,
field_monitor_point=field_monitor_point,
initialized=False,
settings=settings,
nearfield=nearfield,
)
def get_farfield(wavelength: float = 1.55, **kwargs):
"""FIXME: figure out outputs.
based on
http://www.simpetus.com/projects.html#meep_outcoupler
"""
sim_dict = get_simulation_grating_farfield(**kwargs)
sim = sim_dict["sim"]
sim.run(until=400)
fcen = 1 / wavelength
r = 1000 / fcen # 1000 wavelengths out from the source
npts = 1000 # number of points in [0,2*pi) range of angles
farfield_angles = []
farfield_power = []
nearfield = sim["nearfield"]
for n in range(npts):
ff = sim.get_farfield(
nearfield,
mp.Vector3(r * np.cos(np.pi * (n / npts)), r * np.sin(np.pi * (n / npts))),
)
farfield_angles.append(
np.angle(np.cos(np.pi * (n / npts)) + 1j * np.sin(np.pi * (n / npts)))
)
farfield_power.append(ff)
farfield_angles = np.array(farfield_angles)
farfield_power = np.array(farfield_power)
return sim.get_eigenmode_coefficients(
sim_dict["waveguide_monitor"], [1], eig_parity=mp.ODD_Z, direction=mp.X
)
def get_port_1D_eigenmode(
sim_dict,
band_num: int = 1,
fiber_angle_deg: float = 15,
):
"""Args are the following.
sim_dict: simulation dict
band_num: band number to solve for
fiber_angle_deg
Returns:
Mode object compatible with /modes plugin
"""
# Initialize
sim = sim_dict["sim"]
source = sim_dict["sources"][0]
waveguide_monitor = sim_dict["waveguide_monitor"]
fiber_monitor = sim_dict["fiber_monitor"]
# Obtain source frequency
fsrc = source.src.frequency
# Obtain xsection
center_fiber = fiber_monitor.regions[0].center
size_fiber = fiber_monitor.regions[0].size
center_waveguide = waveguide_monitor.regions[0].center
size_waveguide = waveguide_monitor.regions[0].size
# Solve for the modes
if sim_dict["initialized"] is False:
sim.init_sim()
sim_dict["initialized"] = True
# Waveguide
eigenmode_waveguide = sim.get_eigenmode(
direction=mp.X,
where=mp.Volume(center=center_waveguide, size=size_waveguide),
band_num=band_num,
kpoint=mp.Vector3(
fsrc * 3.48, 0, 0
), # Hardcoded index for now, pull from simulation eventually
frequency=fsrc,
)
ys_waveguide = np.linspace(
center_waveguide.y - size_waveguide.y / 2,
center_waveguide.y + size_waveguide.y / 2,
int(sim.resolution * size_waveguide.y),
)
x_waveguide = center_waveguide.x
# Fiber
eigenmode_fiber = sim.get_eigenmode(
direction=mp.NO_DIRECTION,
where=mp.Volume(center=center_fiber, size=size_fiber),
band_num=band_num,
kpoint=mp.Vector3(0, fsrc * 1.45, 0).rotate(
mp.Vector3(z=1), -1 * np.radians(fiber_angle_deg)
), # Hardcoded index for now, pull from simulation eventually
frequency=fsrc,
)
xs_fiber = np.linspace(
center_fiber.x - size_fiber.x / 2,
center_fiber.x + size_fiber.x / 2,
int(sim.resolution * size_fiber.x),
)
y_fiber = center_fiber.y
return (
x_waveguide,
ys_waveguide,
eigenmode_waveguide,
xs_fiber,
y_fiber,
eigenmode_fiber,
)
def plot(sim) -> None:
"""sim: simulation object."""
sim.plot2D(eps_parameters={"contour": True})
# plt.colorbar()
if __name__ == "__main__":
import matplotlib.pyplot as plt
sim_dict = get_simulation_grating_farfield(fiber_xposition=1, fiber_angle_deg=15)
# plot(sim_dict["sim"])
# plt.show()
# results = {}
# for angle in [10]:
# print(angle)
# (
# x_waveguide,
# ys_waveguide,
# eigenmode_waveguide,
# xs_fiber,
# y_fiber,
# eigenmode_fiber,
# ) = get_port_1D_eigenmode(sim_dict, band_num=1, fiber_angle_deg=angle)
# Ez_fiber = np.zeros(len(xs_fiber), dtype=np.complex128)
# for i in range(len(xs_fiber)):
# Ez_fiber[i] = eigenmode_fiber.amplitude(
# mp.Vector3(xs_fiber[i], y_fiber, 0), mp.Ez
# )
# plt.plot(xs_fiber, np.abs(Ez_fiber))
# plt.xlabel("x (um)")
# plt.ylabel("Ez (a.u.)")
# plt.savefig("fiber.png")
# # M1, E-field
# plt.figure(figsize=(10, 8), dpi=100)
# plt.suptitle(
# "MEEP get_eigenmode / MPB find_modes / Lumerical (manual)",
# y=1.05,
# fontsize=18,
# )
# plt.show()
wavelength = 1.55
settings = {}
sim_dict = get_simulation_grating_farfield(**settings)
sim = sim_dict["sim"]
sim.run(until=100)
# sim.run(until=400)
fcen = 1 / wavelength
r = 1000 / fcen # 1000 wavelengths out from the source
npts = 1000 # number of points in [0,2*pi) range of angles
farfield_angles = []
farfield_power = []
nearfield = sim["nearfield"]
for n in range(npts):
ff = sim.get_farfield(
nearfield,
mp.Vector3(r * np.cos(np.pi * (n / npts)), r * np.sin(np.pi * (n / npts))),
)
farfield_angles.append(
np.angle(np.cos(np.pi * (n / npts)) + 1j * np.sin(np.pi * (n / npts)))
)
farfield_power.append(ff)
farfield_angles = np.array(farfield_angles)
farfield_power = np.array(farfield_power)
# Waveguide
res_waveguide = sim.get_eigenmode_coefficients(
sim_dict["waveguide_monitor"], [1], eig_parity=mp.ODD_Z, direction=mp.X
)
print(res_waveguide)
plt.plot(farfield_power)
plt.show()
| mit | cc053f4807b4eb5a790496735d8e1968 | 28.608696 | 88 | 0.588265 | 3.249585 | false | false | false | false |
davidsandberg/facenet | tmp/mnist_noise_labels.py | 1 | 15432 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
from six.moves import urllib # @UnresolvedImport
import tensorflow as tf
import numpy as np
from six.moves import xrange
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
NOISE_FACTOR = 0.2
BETA = 0.8
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"Use half floats instead of full floats if True.")
FLAGS = tf.app.flags.FLAGS
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = np.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=np.float32)
labels = np.zeros(shape=(num_images,), dtype=np.int64)
for image in range(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
nrof_training_examples = train_labels.shape[0]
nrof_changed_labels = int(nrof_training_examples*NOISE_FACTOR)
shuf = np.arange(0,nrof_training_examples)
np.random.shuffle(shuf)
change_idx = shuf[0:nrof_changed_labels]
train_labels[change_idx] = (train_labels[change_idx] + np.random.randint(1,9,size=(nrof_changed_labels,))) % NUM_LABELS
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
data_type(),
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
data_type(),
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.global_variables_initializer().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED, dtype=data_type()))
conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv2_weights = tf.Variable(tf.truncated_normal(
[5, 5, 32, 64], stddev=0.1,
seed=SEED, dtype=data_type()))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc2_biases = tf.Variable(tf.constant(
0.1, shape=[NUM_LABELS], dtype=data_type()))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list() #pylint: disable=no-member
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
# t: observed noisy labels
# q: estimated class probabilities (output from softmax)
# z: argmax of q
t = tf.one_hot(train_labels_node, NUM_LABELS)
q = tf.nn.softmax(logits)
qqq = tf.arg_max(q, dimension=1)
z = tf.one_hot(qqq, NUM_LABELS)
#cross_entropy = -tf.reduce_sum(t*tf.log(q),reduction_indices=1)
cross_entropy = -tf.reduce_sum((BETA*t+(1-BETA)*z)*tf.log(q),reduction_indices=1)
loss = tf.reduce_mean(cross_entropy)
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
# logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, dtype=data_type())
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = np.ndarray(shape=(size, NUM_LABELS), dtype=np.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run() #pylint: disable=no-member
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
tf.app.run()
| mit | bf27d8d7eb9106dda88a15b6aa8fb73c | 43.472622 | 127 | 0.601801 | 3.779574 | false | true | false | false |
jgorset/facepy | facepy/graph_api.py | 1 | 16285 | try:
import simplejson as json
except ImportError:
import json # flake8: noqa
import requests
import hashlib
import hmac
import logging
try:
import urllib.parse as urlparse
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import urlparse
from decimal import Decimal
import six
from facepy.exceptions import *
log = logging.getLogger(__name__)
class GraphAPI(object):
def __init__(self, oauth_token=False, url='https://graph.facebook.com', verify_ssl_certificate=True, appsecret=False, timeout=None, version=None):
"""
Initialize GraphAPI with an OAuth access token.
:param oauth_token: A string describing an OAuth access token.
:param version: A string with version ex. '2.2'.
"""
self.oauth_token = oauth_token
self.session = requests.session()
self.url = url.strip('/')
self.verify_ssl_certificate = verify_ssl_certificate
self.appsecret = appsecret
self.timeout = timeout
self.version = version
@classmethod
def for_application(self, id, secret_key, api_version=None):
"""
Initialize GraphAPI with an OAuth access token for an application.
:param id: An integer describing a Facebook application.
:param secret_key: A String describing the Facebook application's secret key.
"""
from facepy.utils import get_application_access_token
access_token = get_application_access_token(id, secret_key, api_version=api_version)
return GraphAPI(access_token, version=api_version)
def get(self, path='', page=False, retry=3, **options):
"""
Get an item from the Graph API.
:param path: A string describing the path to the item.
:param page: A boolean describing whether to return a generator that
iterates over each page of results.
:param retry: An integer describing how many times the request may be retried.
:param options: Graph API parameters such as 'limit', 'offset' or 'since'.
Floating-point numbers will be returned as :class:`decimal.Decimal`
instances.
See `Facebook's Graph API documentation <http://developers.facebook.com/docs/reference/api/>`_
for an exhaustive list of parameters.
"""
response = self._query(
method='GET',
path=path,
data=options,
page=page,
retry=retry
)
if response is False:
raise FacebookError('Could not get "%s".' % path)
return response
def post(self, path='', retry=0, **data):
"""
Post an item to the Graph API.
:param path: A string describing the path to the item.
:param retry: An integer describing how many times the request may be retried.
:param data: Graph API parameters such as 'message' or 'source'.
See `Facebook's Graph API documentation <http://developers.facebook.com/docs/reference/api/>`_
for an exhaustive list of options.
"""
response = self._query(
method='POST',
path=path,
data=data,
retry=retry
)
if response is False:
raise FacebookError('Could not post to "%s"' % path)
return response
def delete(self, path, retry=3, **data):
"""
Delete an item in the Graph API.
:param path: A string describing the path to the item.
:param retry: An integer describing how many times the request may be retried.
:param data: Graph API parameters such as 'main_page_id' or 'location_page_id'.
See `Facebook's Graph API documentation <http://developers.facebook.com/docs/reference/api/>`_
for an exhaustive list of parameters.
"""
response = self._query(
method='DELETE',
path=path,
data=data,
retry=retry
)
if response is False:
raise FacebookError('Could not delete "%s"' % path)
return response
def search(self, term, type='place', page=False, retry=3, **options):
"""
Search for an item in the Graph API.
:param term: A string describing the search term.
:param type: A string describing the type of items to search for.
:param page: A boolean describing whether to return a generator that
iterates over each page of results.
:param retry: An integer describing how many times the request may be retried.
:param options: Graph API parameters, such as 'center' and 'distance'.
Supported types are only ``place`` since Graph API 2.0.
See `Facebook's Graph API documentation <http://developers.facebook.com/docs/reference/api/>`_
for an exhaustive list of options.
"""
if type != 'place':
raise ValueError('Unsupported type "%s". The only supported type is "place" since Graph API 2.0.' % type)
options = dict({
'q': term,
'type': type,
}, **options)
response = self._query('GET', 'search', options, page, retry)
return response
def batch(self, requests):
"""
Make a batch request.
:param requests: A list of dictionaries with keys 'method', 'relative_url' and optionally 'body'.
Yields a list of responses and/or exceptions.
"""
for request in requests:
if 'body' in request:
request['body'] = urlencode(request['body'])
def _grouper(complete_list, n=1):
"""
Batches a list into constant size chunks.
:param complete_list: A input list (not a generator).
:param n: The size of the chunk.
Adapted from <http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python>
"""
for i in range(0, len(complete_list), n):
yield complete_list[i:i + n]
responses = []
# Maximum batch size for Facebook is 50 so split up requests
# https://developers.facebook.com/docs/graph-api/making-multiple-requests/#limits
for group in _grouper(requests, 50):
responses += self.post(
batch=json.dumps(group)
)
for response, request in zip(responses, requests):
# Facilitate for empty Graph API responses.
#
# https://github.com/jgorset/facepy/pull/30
if not response:
yield None
continue
try:
yield self._parse(response['body'])
except FacepyError as exception:
exception.request = request
yield exception
def _query(self, method, path, data=None, page=False, retry=0):
"""
Fetch an object from the Graph API and parse the output, returning a tuple where the first item
is the object yielded by the Graph API and the second is the URL for the next page of results, or
``None`` if results have been exhausted.
:param method: A string describing the HTTP method.
:param path: A string describing the object in the Graph API.
:param data: A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests).
:param page: A boolean describing whether to return an iterator that iterates over each page of results.
:param retry: An integer describing how many times the request may be retried.
"""
if(data):
data = dict(
(k.replace('_sqbro_', '['), v) for k, v in data.items())
data = dict(
(k.replace('_sqbrc_', ']'), v) for k, v in data.items())
data = dict(
(k.replace('__', ':'), v) for k, v in data.items())
data = data or {}
def load(method, url, data):
for key in data:
value = data[key]
if isinstance(value, (list, dict, set)):
data[key] = json.dumps(value)
try:
if method in ['GET', 'DELETE']:
response = self.session.request(
method, url, params=data, allow_redirects=True,
verify=self.verify_ssl_certificate, timeout=self.timeout
)
if method in ['POST', 'PUT']:
files = {}
for key in data:
if hasattr(data[key], 'read'):
files[key] = data[key]
for key in files:
data.pop(key)
response = self.session.request(
method, url, data=data, files=files,
verify=self.verify_ssl_certificate, timeout=self.timeout
)
if 500 <= response.status_code < 600:
# Facebook 5XX errors usually come with helpful messages
# as a JSON object describing the problem with the request.
# If this is the case, an error will be raised and we just
# need to re-raise it. This is most likely to happen
# with the Ads API.
# This will raise an exception if a JSON-like error object
# comes in the response.
self._parse(response.content)
# If Facebook does not provide any JSON-formatted error
# but just a plain-text, useless error, we'll just inform
# about a Facebook Internal errror occurred.
raise FacebookError(
'Internal Facebook error occurred',
response.status_code
)
except requests.RequestException as exception:
raise HTTPError(exception)
result = self._parse(response.content)
if isinstance(result, dict):
result['headers'] = response.headers
def nested_get(needle, haystack):
"""
Get the the given key anywhere in a nested dictionary.
"""
if needle in haystack:
return haystack[needle]
for key, value in haystack.items():
if isinstance(value, dict):
item = nested_get(needle, value)
if item is not None:
return item
if isinstance(result, dict):
paging = nested_get('paging', result)
if paging:
next_url = paging.get('next', None)
else:
next_url = None
else:
next_url = None
return result, next_url
def load_with_retry(method, url, data):
remaining_retries = retry
while True:
try:
return load(method, url, data)
except FacepyError as e:
log.warn("Exception on %s: %s, retries remaining: %s",
url,
e,
remaining_retries,
)
if remaining_retries > 0:
remaining_retries -= 1
else:
raise
def paginate(method, url, data):
while url:
result, url = load_with_retry(method, url, data)
# Reset pagination parameters.
for key in ['offset', 'until', 'since']:
if key in data:
del data[key]
yield result
# Convert option lists to comma-separated values.
for key in data:
if isinstance(data[key], (list, set, tuple)) and all([isinstance(item, six.string_types) for item in data[key]]):
data[key] = ','.join(data[key])
# Support absolute paths too
if not path.startswith('/'):
if six.PY2:
path = '/' + six.text_type(path.decode('utf-8'))
else:
path = '/' + path
url = self._get_url(path)
if self.oauth_token:
data['access_token'] = self.oauth_token
if self.appsecret and self.oauth_token:
data['appsecret_proof'] = self._generate_appsecret_proof()
if page:
return paginate(method, url, data)
else:
return load_with_retry(method, url, data)[0]
def _get_url(self, path):
# When Facebook returns nested resources (like comments for a post), it
# prepends 'https://graph.facebook.com' by itself and so we must take
# care not to prepend it again.
if urlparse.urlparse(path).netloc == '':
url = self.url
else:
url = ''
if self.version:
url = '%s/v%s%s' % (url, self.version, path)
else:
url = '%s%s' % (url, path)
return url
def _get_error_params(self, error_obj):
error_params = {}
error_fields = ['message', 'code', 'error_subcode', 'error_user_msg',
'is_transient', 'error_data', 'error_user_title',
'fbtrace_id']
if 'error' in error_obj:
error_obj = error_obj['error']
for field in error_fields:
error_params[field] = error_obj.get(field)
return error_params
def _parse(self, data):
"""
Parse the response from Facebook's Graph API.
:param data: A string describing the Graph API's response.
"""
if type(data) == type(bytes()):
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
return data
try:
data = json.loads(data, parse_float=Decimal)
except ValueError:
return data
# Facebook's Graph API sometimes responds with 'true' or 'false'. Facebook offers no documentation
# as to the prerequisites for this type of response, though it seems that it responds with 'true'
# when objects are successfully deleted and 'false' upon attempting to delete or access an item that
# one does not have access to.
#
# For example, the API would respond with 'false' upon attempting to query a feed item without having
# the 'read_stream' extended permission. If you were to query the entire feed, however, it would respond
# with an empty list instead.
#
# Genius.
#
# We'll handle this discrepancy as gracefully as we can by implementing logic to deal with this behavior
# in the high-level access functions (get, post, delete etc.).
if type(data) is dict:
if 'error' in data:
error = data['error']
if error.get('type') == "OAuthException":
exception = OAuthError
else:
exception = FacebookError
raise exception(**self._get_error_params(data))
# Facebook occasionally reports errors in its legacy error format.
if 'error_msg' in data:
raise FacebookError(**self._get_error_params(data))
return data
def _generate_appsecret_proof(self):
"""
Returns a SHA256 of the oauth_token signed by appsecret.
https://developers.facebook.com/docs/graph-api/securing-requests/
"""
if six.PY2:
key = self.appsecret
message = self.oauth_token
else:
key = bytes(self.appsecret, 'utf-8')
message = bytes(self.oauth_token, 'utf-8')
return hmac.new(key, message, hashlib.sha256).hexdigest()
# Proxy exceptions for ease of use and backwards compatibility.
FacebookError, OAuthError, HTTPError = FacebookError, OAuthError, HTTPError
| mit | 07595ae61f74a0a3d0fefe06d60151d3 | 35.188889 | 150 | 0.55198 | 4.635639 | false | false | false | false |
davidsandberg/facenet | src/facenet.py | 3 | 23366 | """Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from subprocess import Popen, PIPE
import tensorflow as tf
import numpy as np
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
from tensorflow.python.training import training
import random
import re
from tensorflow.python.platform import gfile
import math
from six import iteritems
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def get_image_paths_and_labels(dataset):
image_paths_flat = []
labels_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat
def shuffle_examples(image_paths, labels):
shuffle_list = list(zip(image_paths, labels))
random.shuffle(shuffle_list)
image_paths_shuff, labels_shuff = zip(*shuffle_list)
return image_paths_shuff, labels_shuff
def random_rotate_image(image):
angle = np.random.uniform(low=-10.0, high=10.0)
return misc.imrotate(image, angle, 'bicubic')
# 1: Random rotate 2: Random crop 4: Random flip 8: Fixed image standardization 16: Flip
RANDOM_ROTATE = 1
RANDOM_CROP = 2
RANDOM_FLIP = 4
FIXED_STANDARDIZATION = 8
FLIP = 16
def create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder):
images_and_labels_list = []
for _ in range(nrof_preprocess_threads):
filenames, label, control = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents, 3)
image = tf.cond(get_control_flag(control[0], RANDOM_ROTATE),
lambda:tf.py_func(random_rotate_image, [image], tf.uint8),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], RANDOM_CROP),
lambda:tf.random_crop(image, image_size + (3,)),
lambda:tf.image.resize_image_with_crop_or_pad(image, image_size[0], image_size[1]))
image = tf.cond(get_control_flag(control[0], RANDOM_FLIP),
lambda:tf.image.random_flip_left_right(image),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], FIXED_STANDARDIZATION),
lambda:(tf.cast(image, tf.float32) - 127.5)/128.0,
lambda:tf.image.per_image_standardization(image))
image = tf.cond(get_control_flag(control[0], FLIP),
lambda:tf.image.flip_left_right(image),
lambda:tf.identity(image))
#pylint: disable=no-member
image.set_shape(image_size + (3,))
images.append(image)
images_and_labels_list.append([images, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels_list, batch_size=batch_size_placeholder,
shapes=[image_size + (3,), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * 100,
allow_smaller_final_batch=True)
return image_batch, label_batch
def get_control_flag(control, field):
return tf.equal(tf.mod(tf.floor_div(control, field), 2), 1)
def _add_loss_summaries(total_loss):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name +' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
if optimizer=='ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif optimizer=='ADADELTA':
opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError('Invalid optimization algorithm')
grads = opt.compute_gradients(total_loss, update_gradient_vars)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
if log_histograms:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
if log_histograms:
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def crop(image, random_crop, image_size):
if image.shape[1]>image_size:
sz1 = int(image.shape[1]//2)
sz2 = int(image_size//2)
if random_crop:
diff = sz1-sz2
(h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1))
else:
(h, v) = (0,0)
image = image[(sz1-sz2+v):(sz1+sz2+v),(sz1-sz2+h):(sz1+sz2+h),:]
return image
def flip(image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
for i in range(nrof_samples):
img = misc.imread(image_paths[i])
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
img = crop(img, do_random_crop, image_size)
img = flip(img, do_random_flip)
images[i,:,:,:] = img
return images
def get_label_batch(label_data, batch_size, batch_index):
nrof_examples = np.size(label_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = label_data[j:j+batch_size]
else:
x1 = label_data[j:nrof_examples]
x2 = label_data[0:nrof_examples-j]
batch = np.vstack([x1,x2])
batch_int = batch.astype(np.int64)
return batch_int
def get_batch(image_data, batch_size, batch_index):
nrof_examples = np.size(image_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = image_data[j:j+batch_size,:,:,:]
else:
x1 = image_data[j:nrof_examples,:,:,:]
x2 = image_data[0:nrof_examples-j,:,:,:]
batch = np.vstack([x1,x2])
batch_float = batch.astype(np.float32)
return batch_float
def get_triplet_batch(triplets, batch_index, batch_size):
ax, px, nx = triplets
a = get_batch(ax, int(batch_size/3), batch_index)
p = get_batch(px, int(batch_size/3), batch_index)
n = get_batch(nx, int(batch_size/3), batch_index)
batch = np.vstack([a, p, n])
return batch
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
if par[1]=='-':
lr = -1
else:
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(facedir)
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def split_dataset(dataset, split_ratio, min_nrof_images_per_class, mode):
if mode=='SPLIT_CLASSES':
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
split = int(round(nrof_classes*(1-split_ratio)))
train_set = [dataset[i] for i in class_indices[0:split]]
test_set = [dataset[i] for i in class_indices[split:-1]]
elif mode=='SPLIT_IMAGES':
train_set = []
test_set = []
for cls in dataset:
paths = cls.image_paths
np.random.shuffle(paths)
nrof_images_in_class = len(paths)
split = int(math.floor(nrof_images_in_class*(1-split_ratio)))
if split==nrof_images_in_class:
split = nrof_images_in_class-1
if split>=min_nrof_images_per_class and nrof_images_in_class-split>=1:
train_set.append(ImageClass(cls.name, paths[:split]))
test_set.append(ImageClass(cls.name, paths[split:]))
else:
raise ValueError('Invalid train/test split mode "%s"' % mode)
return train_set, test_set
def load_model(model, input_map=None):
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, input_map=input_map, name='')
else:
print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files)==0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files)>1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
return meta_file, ckpt_file
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups())>=2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
def distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
tpr = np.mean(tprs,0)
fpr = np.mean(fprs,0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def store_revision_info(src_path, output_dir, arg_string):
try:
# Get git hash
cmd = ['git', 'rev-parse', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_hash = stdout.strip()
except OSError as e:
git_hash = ' '.join(cmd) + ': ' + e.strerror
try:
# Get local changes
cmd = ['git', 'diff', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_diff = stdout.strip()
except OSError as e:
git_diff = ' '.join(cmd) + ': ' + e.strerror
# Store a text file in the log directory
rev_info_filename = os.path.join(output_dir, 'revision_info.txt')
with open(rev_info_filename, "w") as text_file:
text_file.write('arguments: %s\n--------------------\n' % arg_string)
text_file.write('tensorflow version: %s\n--------------------\n' % tf.__version__) # @UndefinedVariable
text_file.write('git hash: %s\n--------------------\n' % git_hash)
text_file.write('%s' % git_diff)
def list_variables(filename):
reader = training.NewCheckpointReader(filename)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
return names
def put_images_on_grid(images, shape=(16,8)):
nrof_images = images.shape[0]
img_size = images.shape[1]
bw = 3
img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
for i in range(shape[1]):
x_start = i*(img_size+bw)+bw
for j in range(shape[0]):
img_index = i*shape[0]+j
if img_index>=nrof_images:
break
y_start = j*(img_size+bw)+bw
img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
if img_index>=nrof_images:
break
return img
def write_arguments_to_file(args, filename):
with open(filename, 'w') as f:
for key, value in iteritems(vars(args)):
f.write('%s: %s\n' % (key, str(value)))
| mit | 5db670bcec6193b01f3858214dfb49bf | 39.921191 | 146 | 0.628135 | 3.378054 | false | false | false | false |
jgorset/facepy | facepy/utils.py | 1 | 2686 | from datetime import datetime, timedelta
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
from facepy.graph_api import GraphAPI
def get_extended_access_token(access_token, application_id, application_secret_key, api_version=None):
"""
Get an extended OAuth access token.
:param access_token: A string describing an OAuth access token.
:param application_id: An integer describing the Facebook application's ID.
:param application_secret_key: A string describing the Facebook application's secret key.
Returns a tuple with a string describing the extended access token and a datetime instance
describing when it expires.
"""
graph = GraphAPI(version=api_version)
response = graph.get(
path='oauth/access_token',
client_id=application_id,
client_secret=application_secret_key,
grant_type='fb_exchange_token',
fb_exchange_token=access_token
)
try:
#api_version < 2.3 try to parse as it returns string formatted like url query
components = parse_qs(response)
except AttributeError:
# api_version >= 2.3 returns a dict
# Make tidier exception structure to handle expiry time on api_version >=2.3
token = response['access_token']
expiry_countdown = response.get('expires_in', 3600) # https://github.com/jgorset/facepy/pull/172
else:
token = components['access_token'][0]
try:
expiry_countdown = int(components['expires'][0])
except KeyError: # there is no expiration
expiry_countdown = None
if expiry_countdown is not None:
expires_at = datetime.now() + timedelta(seconds=expiry_countdown)
else:
expires_at = None
return token, expires_at
def get_application_access_token(application_id, application_secret_key, api_version=None):
"""
Get an OAuth access token for the given application.
:param application_id: An integer describing a Facebook application's ID.
:param application_secret_key: A string describing a Facebook application's secret key.
"""
graph = GraphAPI(version=api_version)
response = graph.get(
path='oauth/access_token',
client_id=application_id,
client_secret=application_secret_key,
grant_type='client_credentials'
)
try:
data = parse_qs(response)
try:
return data['access_token'][0]
except KeyError:
raise GraphAPI.FacebookError('No access token given')
except AttributeError: # api_version >= 2.3 returns a dict
return response['access_token'], None
| mit | b4b8e4ed38f20509f2c8bdba5b8711d6 | 33 | 104 | 0.672003 | 4.22327 | false | false | false | false |
davidsandberg/facenet | tmp/rename_casia_directories.py | 4 | 1350 | import shutil
import argparse
import os
import sys
def main(args):
identity_map = {}
with open(os.path.expanduser(args.map_file_name), "r") as f:
for line in f:
fields = line.split(' ')
dir_name = fields[0]
class_name = fields[1].replace('\n', '').replace('\r', '')
if class_name not in identity_map.values():
identity_map[dir_name] = class_name
else:
print('Duplicate class names: %s' % class_name)
dataset_path_exp = os.path.expanduser(args.dataset_path)
dirs = os.listdir(dataset_path_exp)
for f in dirs:
old_path = os.path.join(dataset_path_exp, f)
if f in identity_map:
new_path = os.path.join(dataset_path_exp, identity_map[f])
if os.path.isdir(old_path):
print('Renaming %s to %s' % (old_path, new_path))
shutil.move(old_path, new_path)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('map_file_name', type=str, help='Name of the text file that contains the directory to class name mappings.')
parser.add_argument('dataset_path', type=str, help='Path to the dataset directory.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| mit | 7f74edd4600a967966f8ce633d16899d | 35.486486 | 132 | 0.591111 | 3.6 | false | false | false | false |
davidsandberg/facenet | test/restore_test.py | 5 | 7104 | # MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
import tempfile
import os
import shutil
import tensorflow as tf
import numpy as np
class TrainTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.tmp_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(self):
# Recursively remove the temporary directory
shutil.rmtree(self.tmp_dir)
def test_restore_noema(self):
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='W')
b = tf.Variable(tf.zeros([1]), name='b')
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.global_variables_initializer()
saver = tf.train.Saver(tf.trainable_variables())
# Launch the graph.
sess = tf.Session()
sess.run(init)
# Fit the line.
for _ in range(201):
sess.run(train)
w_reference = sess.run('W:0')
b_reference = sess.run('b:0')
saver.save(sess, os.path.join(self.tmp_dir, "model_ex1"))
tf.reset_default_graph()
saver = tf.train.import_meta_graph(os.path.join(self.tmp_dir, "model_ex1.meta"))
sess = tf.Session()
saver.restore(sess, os.path.join(self.tmp_dir, "model_ex1"))
w_restored = sess.run('W:0')
b_restored = sess.run('b:0')
self.assertAlmostEqual(w_reference, w_restored, 'Restored model use different weight than the original model')
self.assertAlmostEqual(b_reference, b_restored, 'Restored model use different weight than the original model')
@unittest.skip("Skip restore EMA test case for now")
def test_restore_ema(self):
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='W')
b = tf.Variable(tf.zeros([1]), name='b')
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
opt_op = optimizer.minimize(loss)
# Track the moving averages of all trainable variables.
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
averages_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([opt_op]):
train_op = tf.group(averages_op)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.global_variables_initializer()
saver = tf.train.Saver(tf.trainable_variables())
# Launch the graph.
sess = tf.Session()
sess.run(init)
# Fit the line.
for _ in range(201):
sess.run(train_op)
w_reference = sess.run('W/ExponentialMovingAverage:0')
b_reference = sess.run('b/ExponentialMovingAverage:0')
saver.save(sess, os.path.join(self.tmp_dir, "model_ex1"))
tf.reset_default_graph()
tf.train.import_meta_graph(os.path.join(self.tmp_dir, "model_ex1.meta"))
sess = tf.Session()
print('------------------------------------------------------')
for var in tf.global_variables():
print('all variables: ' + var.op.name)
for var in tf.trainable_variables():
print('normal variable: ' + var.op.name)
for var in tf.moving_average_variables():
print('ema variable: ' + var.op.name)
print('------------------------------------------------------')
mode = 1
restore_vars = {}
if mode == 0:
ema = tf.train.ExponentialMovingAverage(1.0)
for var in tf.trainable_variables():
print('%s: %s' % (ema.average_name(var), var.op.name))
restore_vars[ema.average_name(var)] = var
elif mode == 1:
for var in tf.trainable_variables():
ema_name = var.op.name + '/ExponentialMovingAverage'
print('%s: %s' % (ema_name, var.op.name))
restore_vars[ema_name] = var
saver = tf.train.Saver(restore_vars, name='ema_restore')
saver.restore(sess, os.path.join(self.tmp_dir, "model_ex1"))
w_restored = sess.run('W:0')
b_restored = sess.run('b:0')
self.assertAlmostEqual(w_reference, w_restored, 'Restored model modes not use the EMA filtered weight')
self.assertAlmostEqual(b_reference, b_restored, 'Restored model modes not use the EMA filtered bias')
# Create a checkpoint file pointing to the model
def create_checkpoint_file(model_dir, model_file):
checkpoint_filename = os.path.join(model_dir, 'checkpoint')
full_model_filename = os.path.join(model_dir, model_file)
with open(checkpoint_filename, 'w') as f:
f.write('model_checkpoint_path: "%s"\n' % full_model_filename)
f.write('all_model_checkpoint_paths: "%s"\n' % full_model_filename)
if __name__ == "__main__":
unittest.main()
| mit | ed029d98cc71b0fec31beb77466fc047 | 38.254144 | 118 | 0.598395 | 3.7888 | false | false | false | false |
davidsandberg/facenet | contributed/export_embeddings.py | 1 | 8585 | """
Exports the embeddings and labels of a directory of images as numpy arrays.
Typicall usage expect the image directory to be of the openface/facenet form and
the images to be aligned. Simply point to your model and your image directory:
python facenet/contributed/export_embeddings.py ~/models/facenet/20170216-091149/ ~/datasets/lfw/mylfw
Output:
embeddings.npy -- Embeddings as np array, Use --embeddings_name to change name
labels.npy -- Integer labels as np array, Use --labels_name to change name
label_strings.npy -- Strings from folders names, --labels_strings_name to change name
Use --image_batch to dictacte how many images to load in memory at a time.
If your images aren't already pre-aligned, use --is_aligned False
I started with compare.py from David Sandberg, and modified it to export
the embeddings. The image loading is done use the facenet library if the image
is pre-aligned. If the image isn't pre-aligned, I use the compare.py function.
I've found working with the embeddings useful for classifications models.
Charles Jekel 2017
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
import facenet
import align.detect_face
import glob
from six.moves import xrange
def main(args):
train_set = facenet.get_dataset(args.data_dir)
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
# fetch the classes (labels as strings) exactly as it's done in get_dataset
path_exp = os.path.expanduser(args.data_dir)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
# get the label strings
label_strings = [name for name in classes if \
os.path.isdir(os.path.join(path_exp, name))]
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(args.model_dir)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
nrof_images = len(image_list)
print('Number of images: ', nrof_images)
batch_size = args.image_batch
if nrof_images % batch_size == 0:
nrof_batches = nrof_images // batch_size
else:
nrof_batches = (nrof_images // batch_size) + 1
print('Number of batches: ', nrof_batches)
embedding_size = embeddings.get_shape()[1]
emb_array = np.zeros((nrof_images, embedding_size))
start_time = time.time()
for i in range(nrof_batches):
if i == nrof_batches -1:
n = nrof_images
else:
n = i*batch_size + batch_size
# Get images for the batch
if args.is_aligned is True:
images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)
else:
images = load_and_align_data(image_list[i*batch_size:n], args.image_size, args.margin, args.gpu_memory_fraction)
feed_dict = { images_placeholder: images, phase_train_placeholder:False }
# Use the facenet model to calcualte embeddings
embed = sess.run(embeddings, feed_dict=feed_dict)
emb_array[i*batch_size:n, :] = embed
print('Completed batch', i+1, 'of', nrof_batches)
run_time = time.time() - start_time
print('Run time: ', run_time)
# export emedings and labels
label_list = np.array(label_list)
np.save(args.embeddings_name, emb_array)
np.save(args.labels_name, label_list)
label_strings = np.array(label_strings)
np.save(args.labels_strings_name, label_strings[label_list])
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in xrange(nrof_samples):
print(image_paths[i])
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str,
help='Directory containing the meta_file and ckpt_file')
parser.add_argument('data_dir', type=str,
help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.')
parser.add_argument('--is_aligned', type=str,
help='Is the data directory already aligned and cropped?', default=True)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.',
default=44)
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.',
default=1.0)
parser.add_argument('--image_batch', type=int,
help='Number of images stored in memory at a time. Default 500.',
default=500)
# numpy file Names
parser.add_argument('--embeddings_name', type=str,
help='Enter string of which the embeddings numpy array is saved as.',
default='embeddings.npy')
parser.add_argument('--labels_name', type=str,
help='Enter string of which the labels numpy array is saved as.',
default='labels.npy')
parser.add_argument('--labels_strings_name', type=str,
help='Enter string of which the labels as strings numpy array is saved as.',
default='label_strings.npy')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| mit | 0a665f6cbe4a0ff30fe2896682244720 | 42.57868 | 132 | 0.659988 | 3.687715 | false | false | false | false |
davidsandberg/facenet | tmp/vggverydeep19.py | 4 | 4024 | """Load the VGG imagenet model into TensorFlow.
Download the model from http://www.robots.ox.ac.uk/~vgg/research/very_deep/
and point to the file 'imagenet-vgg-verydeep-19.mat'
"""
import numpy as np
from scipy import io
import tensorflow as tf
def load(filename, images):
vgg19 = io.loadmat(filename)
vgg19Layers = vgg19['layers']
# A function to get the weights of the VGG layers
def vbbWeights(layerNumber):
W = vgg19Layers[0][layerNumber][0][0][2][0][0]
W = tf.constant(W)
return W
def vbbConstants(layerNumber):
b = vgg19Layers[0][layerNumber][0][0][2][0][1].T
b = tf.constant(np.reshape(b, (b.size)))
return b
modelGraph = {}
modelGraph['input'] = images
modelGraph['conv1_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['input'], filter = vbbWeights(0), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(0))
modelGraph['conv1_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv1_1'], filter = vbbWeights(2), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(2))
modelGraph['avgpool1'] = tf.nn.avg_pool(modelGraph['conv1_2'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
modelGraph['conv2_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool1'], filter = vbbWeights(5), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(5))
modelGraph['conv2_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv2_1'], filter = vbbWeights(7), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(7))
modelGraph['avgpool2'] = tf.nn.avg_pool(modelGraph['conv2_2'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
modelGraph['conv3_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool2'], filter = vbbWeights(10), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(10))
modelGraph['conv3_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv3_1'], filter = vbbWeights(12), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(12))
modelGraph['conv3_3'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv3_2'], filter = vbbWeights(14), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(14))
modelGraph['conv3_4'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv3_3'], filter = vbbWeights(16), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(16))
modelGraph['avgpool3'] = tf.nn.avg_pool(modelGraph['conv3_4'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
modelGraph['conv4_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool3'], filter = vbbWeights(19), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(19))
modelGraph['conv4_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv4_1'], filter = vbbWeights(21), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(21))
modelGraph['conv4_3'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv4_2'], filter = vbbWeights(23), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(23))
modelGraph['conv4_4'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv4_3'], filter = vbbWeights(25), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(25))
modelGraph['avgpool4'] = tf.nn.avg_pool(modelGraph['conv4_4'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
modelGraph['conv5_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool4'], filter = vbbWeights(28), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(28))
modelGraph['conv5_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv5_1'], filter = vbbWeights(30), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(30))
modelGraph['conv5_3'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv5_2'], filter = vbbWeights(32), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(32))
modelGraph['conv5_4'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv5_3'], filter = vbbWeights(34), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(34))
modelGraph['avgpool5'] = tf.nn.avg_pool(modelGraph['conv5_4'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
return modelGraph
| mit | 8372c9a675bf007cfec4c9396569acde | 81.122449 | 162 | 0.63171 | 2.686248 | false | false | false | false |
stanfordnmbl/osim-rl | tests/test.segfault.py | 1 | 1435 | import os
import opensim
opensim.Body('block', 0.0001 , opensim.Vec3(0), opensim.Inertia(1,1,.0001,0,0,0) );
opensim.Body('block', 0.0001 , opensim.Vec3(0), opensim.Inertia(1,1,.0001,0,0,0) );
model_path = os.path.join(os.path.dirname(__file__), '../osim/models/gait9dof18musc.osim')
def test(model_path, visualize):
model = opensim.Model(model_path)
brain = opensim.PrescribedController()
model.addController(brain)
state = model.initSystem()
muscleSet = model.getMuscles()
for j in range(muscleSet.getSize()):
brain.addActuator(muscleSet.get(j))
func = opensim.Constant(1.0)
brain.prescribeControlForActuator(j, func)
block = opensim.Body('block', 0.0001 , opensim.Vec3(0), opensim.Inertia(1,1,.0001,0,0,0) );
model.addComponent(block)
pj = opensim.PlanarJoint('pin',
model.getGround(), # PhysicalFrame
opensim.Vec3(0, 0, 0),
opensim.Vec3(0, 0, 0),
block, # PhysicalFrame
opensim.Vec3(0, 0, 0),
opensim.Vec3(0, 0, 0))
model.addComponent(pj)
model.initSystem()
pj.getCoordinate(1)
test(model_path,False)
test(model_path,False)
from osim.env import L2RunEnv
env = L2RunEnv(visualize=False)
env1 = L2RunEnv(visualize=False)
env1.reset()
env1.reward()
env.reset()
env.reward()
| mit | 596808f6b31ca2e448a35ade9528705d | 30.195652 | 95 | 0.603484 | 2.928571 | false | true | false | false |
stanfordnmbl/osim-rl | osim/env/arm.py | 1 | 4609 | import math
import numpy as np
import os
from .utils.mygym import convert_to_gym
import gym
import opensim
import random
from .osim import OsimEnv
class Arm2DEnv(OsimEnv):
model_path = os.path.join(os.path.dirname(__file__), '../models/arm2dof6musc.osim')
time_limit = 200
target_x = 0
target_y = 0
def get_observation(self):
state_desc = self.get_state_desc()
res = [self.target_x, self.target_y]
# for body_part in ["r_humerus", "r_ulna_radius_hand"]:
# res += state_desc["body_pos"][body_part][0:2]
# res += state_desc["body_vel"][body_part][0:2]
# res += state_desc["body_acc"][body_part][0:2]
# res += state_desc["body_pos_rot"][body_part][2:]
# res += state_desc["body_vel_rot"][body_part][2:]
# res += state_desc["body_acc_rot"][body_part][2:]
for joint in ["r_shoulder","r_elbow",]:
res += state_desc["joint_pos"][joint]
res += state_desc["joint_vel"][joint]
res += state_desc["joint_acc"][joint]
for muscle in sorted(state_desc["muscles"].keys()):
res += [state_desc["muscles"][muscle]["activation"]]
# res += [state_desc["muscles"][muscle]["fiber_length"]]
# res += [state_desc["muscles"][muscle]["fiber_velocity"]]
res += state_desc["markers"]["r_radius_styloid"]["pos"][:2]
return res
def get_observation_space_size(self):
return 16 #46
def generate_new_target(self):
theta = random.uniform(math.pi*0, math.pi*2/3)
radius = random.uniform(0.3, 0.65)
self.target_x = math.cos(theta) * radius
self.target_y = -math.sin(theta) * radius + 0.8
print('\ntarget: [{} {}]'.format(self.target_x, self.target_y))
state = self.osim_model.get_state()
# self.target_joint.getCoordinate(0).setValue(state, self.target_x, False)
self.target_joint.getCoordinate(1).setValue(state, self.target_x, False)
self.target_joint.getCoordinate(2).setLocked(state, False)
self.target_joint.getCoordinate(2).setValue(state, self.target_y, False)
self.target_joint.getCoordinate(2).setLocked(state, True)
self.osim_model.set_state(state)
def reset(self, random_target=True, obs_as_dict=True):
obs = super(Arm2DEnv, self).reset(obs_as_dict=obs_as_dict)
if random_target:
self.generate_new_target()
self.osim_model.reset_manager()
return obs
def __init__(self, *args, **kwargs):
super(Arm2DEnv, self).__init__(*args, **kwargs)
blockos = opensim.Body('target', 0.0001 , opensim.Vec3(0), opensim.Inertia(1,1,.0001,0,0,0) );
self.target_joint = opensim.PlanarJoint('target-joint',
self.osim_model.model.getGround(), # PhysicalFrame
opensim.Vec3(0, 0, 0),
opensim.Vec3(0, 0, 0),
blockos, # PhysicalFrame
opensim.Vec3(0, 0, -0.25),
opensim.Vec3(0, 0, 0))
self.noutput = self.osim_model.noutput
geometry = opensim.Ellipsoid(0.02, 0.02, 0.02);
geometry.setColor(opensim.Green);
blockos.attachGeometry(geometry)
self.osim_model.model.addJoint(self.target_joint)
self.osim_model.model.addBody(blockos)
self.osim_model.model.initSystem()
def reward(self):
state_desc = self.get_state_desc()
penalty = (state_desc["markers"]["r_radius_styloid"]["pos"][0] - self.target_x)**2 + (state_desc["markers"]["r_radius_styloid"]["pos"][1] - self.target_y)**2
# print(state_desc["markers"]["r_radius_styloid"]["pos"])
# print((self.target_x, self.target_y))
if np.isnan(penalty):
penalty = 1
return 1.-penalty
def get_reward(self):
return self.reward()
class Arm2DVecEnv(Arm2DEnv):
def reset(self, obs_as_dict=False):
obs = super(Arm2DVecEnv, self).reset(obs_as_dict=obs_as_dict)
if np.isnan(obs).any():
obs = np.nan_to_num(obs)
return obs
def step(self, action, obs_as_dict=False):
if np.isnan(action).any():
action = np.nan_to_num(action)
obs, reward, done, info = super(Arm2DVecEnv, self).step(action, obs_as_dict=obs_as_dict)
if np.isnan(obs).any():
obs = np.nan_to_num(obs)
done = True
reward -10
return obs, reward, done, info | mit | 0f03aa3cdcce4db4898ccfb3057d43cd | 37.416667 | 165 | 0.567151 | 3.248062 | false | false | false | false |
uccser/cs-unplugged | csunplugged/at_home/management/commands/loadactivities.py | 1 | 2260 | """Module for the custom Django loadactivities command."""
import os.path
from django.core.management.base import BaseCommand
from django.conf import settings
from utils.BaseLoader import BaseLoader
from utils.LoaderFactory import LoaderFactory
from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
class Command(BaseCommand):
"""Required command class for the custom Django loadactivities command."""
help = "Stores content in database."
def add_arguments(self, parser):
"""Add optional parameter to updatedata command."""
parser.add_argument(
"--lite-load",
action="store_true",
dest="lite_load",
help="Perform lite load (only load key content)",
)
def handle(self, *args, **options):
"""Automatically called when the loadactivities command is given.
Raise:
MissingRequiredFieldError: when no object can be found with the matching
attribute.
"""
lite_load = options.get("lite_load")
factory = LoaderFactory()
# Get structure and content files
base_loader = BaseLoader()
base_path = settings.ACTIVITIES_CONTENT_BASE_PATH
structure_file_path = os.path.join(
base_path,
base_loader.structure_dir,
"activities.yaml"
)
structure_file = base_loader.load_yaml_file(structure_file_path)
if structure_file.get("activities", None) is None or not isinstance(structure_file["activities"], dict):
raise MissingRequiredFieldError(
structure_file_path,
["activities"],
"At Home"
)
else:
for activity_slug, activity_data in structure_file["activities"].items():
activity_path = activity_slug
activity_structure_file = "{}.yaml".format(activity_slug)
factory.create_activity_loader(
base_path=base_path,
content_path=activity_path,
structure_filename=activity_structure_file,
lite_loader=lite_load,
activity_data=activity_data,
).load()
| mit | 25085c26c6ba8c6fb40f479c69713894 | 34.873016 | 112 | 0.606637 | 4.788136 | false | false | false | false |
uccser/cs-unplugged | csunplugged/topics/migrations/0093_auto_20190208_0157.py | 1 | 5667 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-02-08 01:57
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0092_auto_20181105_0901'),
]
operations = [
migrations.AddField(
model_name='agegroup',
name='description_mi',
field=models.CharField(default='', max_length=500, null=True),
),
migrations.AddField(
model_name='classroomresource',
name='description_mi',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='curriculumarea',
name='name_mi',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='curriculumintegration',
name='content_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='curriculumintegration',
name='name_mi',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AddField(
model_name='glossaryterm',
name='definition_mi',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='glossaryterm',
name='term_mi',
field=models.CharField(max_length=200, null=True, unique=True),
),
migrations.AddField(
model_name='learningoutcome',
name='text_mi',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AddField(
model_name='lesson',
name='computational_thinking_links_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='lesson',
name='content_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='lesson',
name='heading_tree_mi',
field=django.contrib.postgres.fields.jsonb.JSONField(default=list, null=True),
),
migrations.AddField(
model_name='lesson',
name='name_mi',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='lesson',
name='programming_challenges_description_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='programmingchallenge',
name='content_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='programmingchallenge',
name='extra_challenge_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='programmingchallenge',
name='name_mi',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AddField(
model_name='programmingchallengedifficulty',
name='name_mi',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='programmingchallengeimplementation',
name='expected_result_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='programmingchallengeimplementation',
name='hints_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='programmingchallengeimplementation',
name='solution_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='programmingchallengelanguage',
name='name_mi',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='resourcedescription',
name='description_mi',
field=models.CharField(default='', max_length=300, null=True),
),
migrations.AddField(
model_name='topic',
name='content_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='topic',
name='name_mi',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='topic',
name='other_resources_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='unitplan',
name='computational_thinking_links_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='unitplan',
name='content_mi',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='unitplan',
name='heading_tree_mi',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True),
),
migrations.AddField(
model_name='unitplan',
name='name_mi',
field=models.CharField(default='', max_length=100, null=True),
),
]
| mit | be89a046d9484d0f9997892e54f34e52 | 34.198758 | 90 | 0.550909 | 4.479842 | false | false | false | false |
uccser/cs-unplugged | csunplugged/topics/migrations/0063_auto_20170610_2139.py | 1 | 5029 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-10 21:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('topics', '0062_auto_20170609_0424'),
]
operations = [
migrations.CreateModel(
name='ProgrammingChallenge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('name', models.CharField(max_length=200)),
('challenge_set_number', models.PositiveSmallIntegerField()),
('challenge_number', models.PositiveSmallIntegerField()),
('content', models.TextField()),
('extra_challenge', models.TextField(null=True)),
],
),
migrations.RenameModel(
old_name='ProgrammingExerciseDifficulty',
new_name='ProgrammingChallengeDifficulty',
),
migrations.RenameModel(
old_name='ProgrammingExerciseLanguageImplementation',
new_name='ProgrammingChallengeImplementation',
),
migrations.RenameModel(
old_name='ProgrammingExerciseLanguage',
new_name='ProgrammingChallengeLanguage',
),
migrations.RenameModel(
old_name='ConnectedGeneratedResource',
new_name='ResourceDescription',
),
migrations.RemoveField(
model_name='programmingexercise',
name='difficulty',
),
migrations.RemoveField(
model_name='programmingexercise',
name='learning_outcomes',
),
migrations.RemoveField(
model_name='programmingexercise',
name='topic',
),
migrations.RenameField(
model_name='agerange',
old_name='age_range',
new_name='ages',
),
migrations.RemoveField(
model_name='lesson',
name='programming_exercises',
),
migrations.RemoveField(
model_name='programmingchallengeimplementation',
name='exercise',
),
migrations.AlterField(
model_name='lesson',
name='age_range',
field=models.ManyToManyField(related_name='lessons', to='topics.AgeRange'),
),
migrations.AlterField(
model_name='lesson',
name='generated_resources',
field=models.ManyToManyField(related_name='lessons', through='topics.ResourceDescription', to='resources.Resource'),
),
migrations.AlterField(
model_name='lesson',
name='learning_outcomes',
field=models.ManyToManyField(related_name='lessons', to='topics.LearningOutcome'),
),
migrations.AlterField(
model_name='lesson',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lessons', to='topics.Topic'),
),
migrations.AlterField(
model_name='lesson',
name='unit_plan',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lessons', to='topics.UnitPlan'),
),
migrations.AlterField(
model_name='unitplan',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='unit_plans', to='topics.Topic'),
),
migrations.DeleteModel(
name='ProgrammingExercise',
),
migrations.AddField(
model_name='programmingchallenge',
name='difficulty',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='programming_challenges', to='topics.ProgrammingChallengeDifficulty'),
),
migrations.AddField(
model_name='programmingchallenge',
name='learning_outcomes',
field=models.ManyToManyField(related_name='programming_challenges', to='topics.LearningOutcome'),
),
migrations.AddField(
model_name='programmingchallenge',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='programming_challenges', to='topics.Topic'),
),
migrations.AddField(
model_name='lesson',
name='programming_challenges',
field=models.ManyToManyField(related_name='lessons', to='topics.ProgrammingChallenge'),
),
migrations.AddField(
model_name='programmingchallengeimplementation',
name='challenge',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='implementations', to='topics.ProgrammingChallenge'),
preserve_default=False,
),
]
| mit | 2a62a2194432e97c02279abd91e2568c | 38.289063 | 164 | 0.593557 | 4.669452 | false | false | false | false |
uccser/cs-unplugged | csunplugged/plugging_it_in/urls.py | 1 | 1093 | """URL routing for the plugging_it_in application."""
from django.urls import path
from django.conf.urls import url
from . import views
app_name = "plugging_it_in"
urlpatterns = [
url(
r"^$",
views.IndexView.as_view(),
name="index"
),
path(
'about/',
views.AboutView.as_view(),
name="about"
),
path(
'block-based-vs-scratch/',
views.BlockBasedAndScratchView.as_view(),
name="block_based_vs_scratch"
),
url(
r"^(?P<topic_slug>[-\w]+)/(?P<lesson_slug>[-\w]+)/$",
views.ProgrammingChallengeListView.as_view(),
name="lesson"
),
url(
r"^(?P<topic_slug>[-\w]+)/(?P<lesson_slug>[-\w]+)/(?P<challenge_slug>[-\w]+)/(?P<language_slug>[-\w]+)/$",
views.ProgrammingChallengeView.as_view(),
name="programming_challenge"
),
url(
r"^jobe_proxy$",
views.JobeProxyView.as_view(),
name="jobe_proxy"
),
url(
r"^save_attempt$",
views.SaveAttemptView.as_view(),
name="save_attempt"
),
]
| mit | f77310023ebc5849da82fc418d34c5cf | 23.288889 | 114 | 0.534309 | 3.302115 | false | false | false | false |
uccser/cs-unplugged | csunplugged/tests/resources/views/test_index_view.py | 1 | 2179 | from http import HTTPStatus
from django.test import tag
from django.urls import reverse
from tests.BaseTestWithDB import BaseTestWithDB
from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator
@tag("resource")
class IndexViewTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ResourcesTestDataGenerator()
self.language = "en"
def test_resources_index_with_no_resources(self):
url = reverse("resources:index")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["all_resources"]), 0)
def test_resources_index_with_one_resource(self):
self.test_data.create_resource(
"resource",
"Resource",
"Description",
"GridResourceGenerator",
)
url = reverse("resources:index")
response = self.client.get(url)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertQuerysetEqual(
response.context["all_resources"],
["<Resource: Resource>"]
)
self.assertEqual(
response.context["all_resources"][0].thumbnail,
"/static/img/resources/resource/thumbnails/en/resource-paper_size-a4.png"
)
def test_resources_index_with_multiple_resources(self):
self.test_data.create_resource(
"binary-cards",
"Binary Cards",
"Description of binary cards",
"BinaryCardsResourceGenerator",
)
self.test_data.create_resource(
"sorting-network",
"Sorting Network",
"Description of sorting network",
"SortingNetworkResourceGenerator",
)
url = reverse("resources:index")
response = self.client.get(url)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertQuerysetEqual(
response.context["all_resources"],
[
"<Resource: Binary Cards>",
"<Resource: Sorting Network>",
]
)
| mit | d896a1c715d043cf993e903c524e6d88 | 33.587302 | 85 | 0.608077 | 4.446939 | false | true | false | false |
uccser/cs-unplugged | csunplugged/topics/management/commands/_GlossaryTermsLoader.py | 1 | 1862 | """Custom loader for loading glossary terms."""
from os import listdir
from django.db import transaction
from utils.language_utils import get_default_language
from topics.models import GlossaryTerm
from utils.TranslatableModelLoader import TranslatableModelLoader
class GlossaryTermsLoader(TranslatableModelLoader):
"""Custom loader for loading glossary terms."""
FILE_EXTENSION = ".md"
@transaction.atomic
def load(self):
"""Load the glossary content into the database."""
glossary_slugs = set()
for filename in listdir(self.get_localised_dir(get_default_language())):
if filename.endswith(self.FILE_EXTENSION):
glossary_slug = filename[:-len(self.FILE_EXTENSION)]
glossary_slugs.add(glossary_slug)
for glossary_slug in glossary_slugs:
term_translations = self.get_blank_translation_dictionary()
content_filename = "{}.md".format(glossary_slug)
content_translations = self.get_markdown_translations(content_filename)
for language, content in content_translations.items():
term_translations[language]["definition"] = content.html_string
term_translations[language]["term"] = content.title
glossary_term, created = GlossaryTerm.objects.update_or_create(
slug=glossary_slug,
defaults={},
)
self.populate_translations(glossary_term, term_translations)
self.mark_translation_availability(glossary_term, required_fields=["term", "definition"])
glossary_term.save()
if created:
self.log(f'Added glossary term: {glossary_term}')
else:
self.log(f'Updated glossary term: {glossary_term}')
self.log("All glossary terms loaded!\n")
| mit | 486be25096cd2f0f5887611fbd66db3c | 37 | 101 | 0.647691 | 4.15625 | false | false | false | false |
uccser/cs-unplugged | csunplugged/at_a_distance/management/commands/_LessonLoader.py | 1 | 5661 | """Custom loader for loading an at a distance lesson."""
from django.db import transaction
from utils.TranslatableModelLoader import TranslatableModelLoader
from utils.check_required_files import find_image_files
from utils.errors.CouldNotFindYAMLFileError import CouldNotFindYAMLFileError
from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
from utils.errors.InvalidYAMLValueError import InvalidYAMLValueError
from utils.language_utils import (
get_available_languages,
get_default_language,
)
from at_a_distance.models import Lesson, SupportingResource
from at_a_distance.settings import (
AT_A_DISTANCE_INTRODUCTION_FILENAME,
AT_A_DISTANCE_SUPPORTING_RESOURCES_FILENAME,
)
class AtADistanceLessonLoader(TranslatableModelLoader):
"""Custom loader for loading an lesson."""
def __init__(self, lesson_number, **kwargs):
"""Create the loader for loading a lesson.
Args:
lesson_number: Number of the lesson (int).
"""
super().__init__(**kwargs)
self.lesson_number = lesson_number
self.lesson_slug = self.content_path
@transaction.atomic
def load(self):
"""Load the content for an at a distance lesson.
Raise:
MissingRequiredFieldError: when no object can be found with the matching attribute.
"""
lesson_structure = self.load_yaml_file(self.structure_file_path)
lesson_translations = self.get_blank_translation_dictionary()
icon_path = lesson_structure.get('icon')
if icon_path:
find_image_files([icon_path], self.structure_file_path)
# Suitability values
suitability_options = [i[0] for i in Lesson.SUITABILITY_CHOICES]
try:
suitable_teaching_students = lesson_structure['suitable-for-teaching-students']
except KeyError:
raise MissingRequiredFieldError(
self.structure_file_path,
[
"suitable-for-teaching-students",
],
"Lesson"
)
else:
if suitable_teaching_students not in suitability_options:
raise InvalidYAMLValueError(
self.structure_file_path,
"suitable-for-teaching-students",
suitability_options,
)
try:
suitable_teaching_educators = lesson_structure['suitable-for-teaching-educators']
except KeyError:
raise MissingRequiredFieldError(
self.structure_file_path,
[
"suitable-for-teaching-educators",
],
"Lesson"
)
else:
if suitable_teaching_educators not in suitability_options:
raise InvalidYAMLValueError(
self.structure_file_path,
"suitable-for-teaching-educators",
suitability_options,
)
# Introduction content
content_translations = self.get_markdown_translations(
AT_A_DISTANCE_INTRODUCTION_FILENAME,
relative_links_external=True
)
for language, content in content_translations.items():
lesson_translations[language]['name'] = content.title
lesson_translations[language]['introduction'] = content.html_string
# Create or update lesson objects and save to the database
lesson, created = Lesson.objects.update_or_create(
slug=self.lesson_slug,
defaults={
'order_number': self.lesson_number,
'icon': icon_path,
'suitable_for_teaching_students': suitable_teaching_students,
'suitable_for_teaching_educators': suitable_teaching_educators,
},
)
self.populate_translations(lesson, lesson_translations)
self.mark_translation_availability(lesson, required_fields=['name', 'introduction'])
lesson.save()
# Supporting resources
lesson.supporting_resources.all().delete()
supporting_resources = lesson_structure.get('supporting-resources')
if supporting_resources:
self.add_supporting_resource_translations(lesson)
if created:
term = 'Created'
else:
term = 'Updated'
self.log(f'{term} At A Distance Lesson: {lesson}')
def add_supporting_resource_translations(self, lesson):
"""Get dictionary of translations of supporting resources.
Returns:
Dictionary mapping language codes to HTML.
Raises:
CouldNotFindYAMLFileError: If the requested file could not be found
in the /en directory tree
"""
for language in get_available_languages():
yaml_file_path = self.get_localised_file(
language,
AT_A_DISTANCE_SUPPORTING_RESOURCES_FILENAME,
)
try:
supporting_resources = self.load_yaml_file(yaml_file_path)
except CouldNotFindYAMLFileError:
if language == get_default_language():
raise
else:
for (index, supporting_resource) in enumerate(supporting_resources):
SupportingResource.objects.create(
order_number=index,
text=supporting_resource['text'],
url=supporting_resource['url'],
language=language,
lesson=lesson,
)
| mit | c4d73b9e9913c2d282660dd4fe81dc2f | 36.490066 | 95 | 0.598657 | 4.52518 | false | false | false | false |
uccser/cs-unplugged | csunplugged/topics/migrations/0087_auto_20171122_0324.py | 1 | 10654 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-22 03:24
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0086_auto_20171108_0840'),
]
operations = [
migrations.RemoveField(
model_name='agegroup',
name='description_de',
),
migrations.RemoveField(
model_name='agegroup',
name='description_fr',
),
migrations.RemoveField(
model_name='classroomresource',
name='description_de',
),
migrations.RemoveField(
model_name='classroomresource',
name='description_fr',
),
migrations.RemoveField(
model_name='curriculumarea',
name='name_de',
),
migrations.RemoveField(
model_name='curriculumarea',
name='name_fr',
),
migrations.RemoveField(
model_name='curriculumintegration',
name='content_de',
),
migrations.RemoveField(
model_name='curriculumintegration',
name='content_fr',
),
migrations.RemoveField(
model_name='curriculumintegration',
name='name_de',
),
migrations.RemoveField(
model_name='curriculumintegration',
name='name_fr',
),
migrations.RemoveField(
model_name='glossaryterm',
name='definition_de',
),
migrations.RemoveField(
model_name='glossaryterm',
name='definition_fr',
),
migrations.RemoveField(
model_name='glossaryterm',
name='term_de',
),
migrations.RemoveField(
model_name='glossaryterm',
name='term_fr',
),
migrations.RemoveField(
model_name='learningoutcome',
name='text_de',
),
migrations.RemoveField(
model_name='learningoutcome',
name='text_fr',
),
migrations.RemoveField(
model_name='lesson',
name='computational_thinking_links_de',
),
migrations.RemoveField(
model_name='lesson',
name='computational_thinking_links_fr',
),
migrations.RemoveField(
model_name='lesson',
name='content_de',
),
migrations.RemoveField(
model_name='lesson',
name='content_fr',
),
migrations.RemoveField(
model_name='lesson',
name='heading_tree_de',
),
migrations.RemoveField(
model_name='lesson',
name='heading_tree_fr',
),
migrations.RemoveField(
model_name='lesson',
name='name_de',
),
migrations.RemoveField(
model_name='lesson',
name='name_fr',
),
migrations.RemoveField(
model_name='lesson',
name='programming_challenges_description_de',
),
migrations.RemoveField(
model_name='lesson',
name='programming_challenges_description_fr',
),
migrations.RemoveField(
model_name='programmingchallenge',
name='content_de',
),
migrations.RemoveField(
model_name='programmingchallenge',
name='content_fr',
),
migrations.RemoveField(
model_name='programmingchallenge',
name='extra_challenge_de',
),
migrations.RemoveField(
model_name='programmingchallenge',
name='extra_challenge_fr',
),
migrations.RemoveField(
model_name='programmingchallenge',
name='name_de',
),
migrations.RemoveField(
model_name='programmingchallenge',
name='name_fr',
),
migrations.RemoveField(
model_name='programmingchallengedifficulty',
name='name_de',
),
migrations.RemoveField(
model_name='programmingchallengedifficulty',
name='name_fr',
),
migrations.RemoveField(
model_name='programmingchallengeimplementation',
name='expected_result_de',
),
migrations.RemoveField(
model_name='programmingchallengeimplementation',
name='expected_result_fr',
),
migrations.RemoveField(
model_name='programmingchallengeimplementation',
name='hints_de',
),
migrations.RemoveField(
model_name='programmingchallengeimplementation',
name='hints_fr',
),
migrations.RemoveField(
model_name='programmingchallengeimplementation',
name='solution_de',
),
migrations.RemoveField(
model_name='programmingchallengeimplementation',
name='solution_fr',
),
migrations.RemoveField(
model_name='programmingchallengelanguage',
name='name_de',
),
migrations.RemoveField(
model_name='programmingchallengelanguage',
name='name_fr',
),
migrations.RemoveField(
model_name='resourcedescription',
name='description_de',
),
migrations.RemoveField(
model_name='resourcedescription',
name='description_fr',
),
migrations.RemoveField(
model_name='topic',
name='content_de',
),
migrations.RemoveField(
model_name='topic',
name='content_fr',
),
migrations.RemoveField(
model_name='topic',
name='name_de',
),
migrations.RemoveField(
model_name='topic',
name='name_fr',
),
migrations.RemoveField(
model_name='topic',
name='other_resources_de',
),
migrations.RemoveField(
model_name='topic',
name='other_resources_fr',
),
migrations.RemoveField(
model_name='unitplan',
name='computational_thinking_links_de',
),
migrations.RemoveField(
model_name='unitplan',
name='computational_thinking_links_fr',
),
migrations.RemoveField(
model_name='unitplan',
name='content_de',
),
migrations.RemoveField(
model_name='unitplan',
name='content_fr',
),
migrations.RemoveField(
model_name='unitplan',
name='heading_tree_de',
),
migrations.RemoveField(
model_name='unitplan',
name='heading_tree_fr',
),
migrations.RemoveField(
model_name='unitplan',
name='name_de',
),
migrations.RemoveField(
model_name='unitplan',
name='name_fr',
),
migrations.AlterField(
model_name='agegroup',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='classroomresource',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='curriculumarea',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='curriculumintegration',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='glossaryterm',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='learningoutcome',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='lesson',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='programmingchallenge',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='programmingchallengedifficulty',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='programmingchallengeimplementation',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='programmingchallengelanguage',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='resourcedescription',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='topic',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
migrations.AlterField(
model_name='unitplan',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10), default=[], size=None),
),
]
| mit | 812e369daea1cd2acde5cb6fcd0cd1a3 | 32.503145 | 127 | 0.549559 | 4.614119 | false | false | false | false |
uccser/cs-unplugged | csunplugged/at_a_distance/views.py | 1 | 3641 | """Views for the at a distance application."""
import os.path
from django.views import generic
from django.utils import translation
from django.conf import settings
from django.http import JsonResponse
from django.utils.translation import get_language
from at_a_distance.models import Lesson
from at_a_distance.settings import AT_A_DISTANCE_SLIDE_RESOLUTION
from at_a_distance.utils import get_slide_lengths
class IndexView(generic.ListView):
"""View for the at a distance application homepage."""
template_name = "at_a_distance/index.html"
model = Lesson
context_object_name = "lessons"
class DeliveryGuideView(generic.TemplateView):
"""View for the devliery guide page."""
template_name = "at_a_distance/delivery-guide.html"
class LessonView(generic.DetailView):
"""View for a specific lesson."""
model = Lesson
template_name = "at_a_distance/lesson.html"
context_object_name = "lesson"
slug_url_kwarg = "lesson_slug"
def get_context_data(self, **kwargs):
"""Provide the context data for the index view.
Returns:
Dictionary of context data.
"""
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
context['slides_pdf'] = os.path.join(
"slides",
get_language(),
self.object.slug,
f"{self.object.slug}-slides.pdf"
)
context['notes_pdf'] = os.path.join(
"slides",
get_language(),
self.object.slug,
f"{self.object.slug}-speaker-notes.pdf"
)
return context
class LessonSlidesView(generic.DetailView):
"""View for a specific lesson's slides."""
model = Lesson
template_name = "at_a_distance/lesson-slides.html"
context_object_name = "lesson"
slug_url_kwarg = "lesson_slug"
class LessonFileGenerationView(generic.DetailView):
"""View for generating a specific lesson's files."""
model = Lesson
template_name = "at_a_distance/lesson-slides.html"
context_object_name = "lesson"
slug_url_kwarg = "lesson_slug"
def get_context_data(self, **kwargs):
"""Provide the context data for the index view.
Returns:
Dictionary of context data.
"""
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
context['fragments'] = 'false'
context['slide_number'] = 'false'
return context
class LessonSlideSpeakerNotesView(generic.TemplateView):
"""View for speaker notes window."""
template_name = "at_a_distance/reveal-speaker-notes-plugin/speaker-notes-window.html"
def slides_file_generation_json(request, **kwargs):
"""Provide JSON data for creating thumbnails.
Args:
request: The HTTP request.
Returns:
JSON response is sent containing data for thumbnails.
"""
data = dict()
if request.GET.get("language", False) == "all":
languages = settings.DEFAULT_LANGUAGES
elif request.GET.get("language", False):
languages = [(request.GET.get("language"), "")]
else:
languages = [("en", "")]
# For each language{}
data["languages"] = dict()
for language_code, _ in languages:
with translation.override(language_code):
data["languages"][language_code] = list(Lesson.translated_objects.values_list('slug', flat=True))
# Other values
data["resolution"] = AT_A_DISTANCE_SLIDE_RESOLUTION
data["slide_counts"] = get_slide_lengths()
return JsonResponse(data, safe=False)
| mit | 01b4a311b0671be7ab6a83b419424e6e | 28.601626 | 109 | 0.648723 | 3.840717 | false | false | false | false |
jewettaij/moltemplate | moltemplate/lttree_postprocess.py | 1 | 20953 | #!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013
# All rights reserved.
"""
lttree_postprocess.py
This is a stand-alone python script which checks the files created by
lttree.py to insure that the standard instance-variables ($variables)
have all been defined. This script performs a task which is very similar
to the task performed by lttree_check.py. This script attempts to detect
mistakes in the names of $atom, $bond, $angle, $dihedral, $improper, & $mol
variables.
"""
import sys
try:
from .lttree_styles import *
from .ttree_lex import ExtractCatName, SplitQuotedString
except (ImportError, SystemError, ValueError):
# not installed as a package
from lttree_styles import *
from ttree_lex import ExtractCatName, SplitQuotedString
g_program_name = __file__.split('/')[-1] # = 'lttree_postprocess.py'
g_version_str = '0.6.2'
g_date_str = '2021-4-20'
def main():
atom_style = 'full'
ttree_assignments_fname = 'ttree_assignments.txt'
defined_mols = set([])
defined_atoms = set([])
defined_masses = set([])
defined_bonds = set([])
defined_angles = set([])
defined_dihedrals = set([])
defined_impropers = set([])
g_no_check_msg = \
'(To override this error, run moltemplate using the \"-nocheck\" argument.)\n'
if len(sys.argv) > 1:
for i in range(0, len(sys.argv)):
if ((sys.argv[i].lower() == '-atomstyle') or
(sys.argv[i].lower() == '-atom-style') or
(sys.argv[i].lower() == '-atom_style')):
if i + 1 >= len(sys.argv):
raise InputError('Error(' + g_program_name + '): The ' + sys.argv[i] + ' flag should be followed by a LAMMPS\n'
' atom_style name (or single quoted string containing a space-separated\n'
' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n')
atom_style = sys.argv[i + 1]
elif ((sys.argv[i].lower() == '-ttreeassignments') or
(sys.argv[i].lower() == '-ttree-assignments') or
(sys.argv[i].lower() == '-ttree_assignments')):
if i + 1 >= len(sys.argv):
raise InputError('Error(' + g_program_name + '): The ' + sys.argv[i] + ' flag should be followed by \n'
' a file containing the variable bindings created by ttree/moltemplate.\n')
ttree_assignments_fname = sys.argv[i + 1]
else:
pass # ignore other arguments (they are intended for lttree.py)
atom_column_names = AtomStyle2ColNames(atom_style)
i_atomid = 0
i_molid = -1
for i in range(0, len(atom_column_names)):
if atom_column_names[i].lower() == 'atom-id':
i_atomid = i
elif atom_column_names[i].lower() == 'molecule-id':
i_molid = i
i_max_column = max(i_atomid, i_molid)
# The following variables are defined in "lttree_styles.py"
#data_atoms="Data Atoms"
#data_masses="Data Masses"
#data_velocities="Data Velocities"
#data_bonds="Data Bonds"
#data_angles="Data Angles"
#data_dihedrals="Data Dihedrals"
#data_impropers="Data Impropers"
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + '\n')
try:
# ------------ defined_atoms ------------
try:
f = open(data_atoms + '.template', 'r')
except:
raise InputError('Error(' + g_program_name + '): Unable to open file\n' +
'\"' + data_atoms + '.template\"\n'
' for reading. (Do your files lack a \"' +
data_atoms + '\" section?)\n'
+ g_no_check_msg + '\n')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
# Split the line into words (tokens) using whitespace delimiters
tokens = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(tokens) == 0:
pass
elif len(tokens) <= i_max_column:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_atoms + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This might be an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_atoms.add(tokens[i_atomid])
if i_molid != -1:
defined_mols.add(tokens[i_molid])
f.close()
# ------------ defined_bonds ------------
try:
f = open(data_bonds + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
#Split the line into words (tokens) using whitespace delimeters
tokens = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(tokens) == 0:
pass
elif len(tokens) < 4:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_bonds + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This might be an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_bonds.add(tokens[0])
f.close()
except:
pass # Defining bonds (stored in the data_bonds file) is optional
# ------------ defined_angles ------------
try:
f = open(data_angles + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
#Split the line into words (tokens) using whitespace delimeters
tokens = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(tokens) == 0:
pass
elif len(tokens) < 5:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_angles + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This might be an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_angles.add(tokens[0])
f.close()
except:
pass # Defining angles (stored in the data_angles file) is optional
# ------------ defined_dihedrals ------------
try:
f = open(data_dihedrals + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
#Split the line into words (tokens) using whitespace delimeters
tokens = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(tokens) == 0:
pass
elif len(tokens) < 6:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_dihedrals + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This might be an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_dihedrals.add(tokens[0])
f.close()
except:
# Defining dihedrals (stored in the data_dihedrals file) is optional
pass
# ------------ defined_impropers ------------
try:
f = open(data_impropers + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
#Split the line into words (tokens) using whitespace delimeters
tokens = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(tokens) == 0:
pass
elif len(tokens) < 6:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_impropers + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This might be an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_impropers.add(tokens[0])
f.close()
except:
# Defining impropers (stored in the data_impropers file) is optional
pass
# ------------ defined_bonds ------------
try:
f = open(data_masses + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
#Split the line into words (tokens) using whitespace delimeters
tokens = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(tokens) == 0:
pass
elif len(tokens) != 2:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_masses + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This might be an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_masses.add(tokens[0])
f.close()
except:
pass # Defining mass (stored in the data_masses file) is optional
# ---- Check ttree_assignments to make sure variables are defined ----
try:
f = open(ttree_assignments_fname, 'r')
except:
raise InputError('Error(' + g_program_name + '): Unable to open file\n' +
'\"' + ttree_assignments_fname + '\"\n'
' for reading. (Do your files lack a \"' +
data_atoms + '\" section?)\n'
+ g_no_check_msg + '\n')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
usage_location_str = 'near ' + line_orig[ic + 1:]
else:
line = line_orig.rstrip('\n')
usage_location_str = ''
# Split the line into words (tokens) using whitespace delimeters
tokens = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(tokens) == 0:
pass
if len(tokens) > 0:
# This file contains a list of variables of the form:
#
# @/atom:MoleculeType1:C 1
# @/atom:MoleculeType1:H 2
# @/atom:MoleculeType2:N 3
# $/atom:molecule1:N1 1
# $/atom:molecule1:C1 2
# :
# $/atom:molecule1141:CH 13578
# $/atom:molecule1142:N3 13579
# :
# We only care about instance variables (which use the '$' prefix)
# Lines corresponding to static variables (which use the '@' prefix)
# are ignored during this pass.
i_prefix = tokens[0].find('$')
if i_prefix != -1:
descr_str = tokens[0][i_prefix + 1:]
cat_name = ExtractCatName(descr_str)
if ((cat_name == 'atom') and
(tokens[0] not in defined_atoms)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $atom:\n\n' +
' ' + tokens[0] + ' (<--full name)\n\n' +
' (This $atom was not found in the "Data Atoms" sections in your LT files.\n' +
' If this atom belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..))\n\n' +
g_no_check_msg)
elif ((cat_name == 'bond') and
(tokens[0] not in defined_bonds)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $bond:\n\n' +
' ' + tokens[0] + ' (<--full name)\n\n' +
' (This $bond was not found in either the "Data Bonds" sections,\n' +
' or the "Data Bond List" sections of any of your LT files.\n' +
' If this bond belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..))\n\n' +
g_no_check_msg)
elif ((cat_name == 'angle') and
(tokens[0] not in defined_angles)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $angle:\n\n' +
' ' + tokens[0] + ' (<--full name)\n\n' +
' (This $angle was not found in the "Data Angles" sections in your LT files\n'
' If this angle belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..)\n' +
' It is also possible that you have misnamed the "Data Angles" section.)\n\n' +
g_no_check_msg)
elif ((cat_name == 'dihedral') and
(tokens[0] not in defined_dihedrals)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n\n' +
' Reference to undefined $dihedral:\n\n' +
' ' + tokens[0] + ' (<--full name)\n\n' +
' (This dihedral was not found in the "Data Dihedrals" sections in your files\n' +
' If this dihedral belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..)\n' +
' It is also possible that you have misnamed the "Data Dihedrals" section.)\n\n' +
g_no_check_msg)
elif ((cat_name == 'improper') and
(tokens[0] not in defined_impropers)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $improper:\n\n' +
' ' + tokens[0] + ' (<--full name)\n\n' +
' (This improper was not found in the "Data Impropers" sections in your files\n' +
' If this improper belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..)\n' +
' It is also possible that you have misnamed the "Data Impropers" section.)\n\n' +
g_no_check_msg)
# I used to generate an error when a users defines a $mol
# variable but does not associate any atoms with it (or if the
# user systematically deletes all the atoms in that molecule),
# but I stopped this practice.
# I don't think there is any real need to complain if some
# molecule id numbers are undefined. LAMMPS does not care.
#
# elif ((cat_name == 'mol') and
# (tokens[0] not in defined_mols)):
# raise InputError('Error('+g_program_name+'): '+usage_location_str+'\n'+
# ' Reference to undefined $mol (molecule-ID) variable:\n\n'
# ' '+tokens[0]+' (<--full name)\n\n'+
# ' (If that molecule is part of a larger molecule, then make sure that\n'+
# ' you specified the correct path which leads to it (using / and ..))\n\n'+
# g_no_check_msg)
# Now check for @ (type) counter variables (such as @atom):
i_prefix = tokens[0].find('@')
if i_prefix != -1:
descr_str = tokens[0][i_prefix + 1:]
cat_name = ExtractCatName(descr_str)
if ((cat_name == 'atom') and (len(defined_masses) > 0) and
(tokens[0] not in defined_masses)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' A reference to an @atom: of type:\n'
' ' + tokens[0] + ' (<--full type name)\n\n' +
' ...was found, however its mass was never defined.\n'
' (Make sure that there is a "write_once("Data Masses"){" section in one\n'
' of your LT files which defines the mass of this atom type. If the\n'
' atom type name contains "/", then make sure the path is correct.)\n\n' +
g_no_check_msg)
f.close()
sys.stderr.write(g_program_name + ': -- No errors detected. --\n')
exit(0)
except (ValueError, InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(1)
return
if __name__ == '__main__':
main()
| mit | 28472614c94242fd5fdd9b190a23e343 | 46.947368 | 131 | 0.422231 | 4.504084 | false | false | false | false |
jewettaij/moltemplate | moltemplate/bonds_by_type.py | 1 | 16436 | #!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013
"""
bonds_by_type.py reads a LAMMPS data file (or an excerpt of a LAMMPS)
data file containing bonded many-body interactions by atom type
(and bond type), and generates a list of additional interactions
in LAMMPS format consistent with those type (to the standard out).
Typical Usage:
bonds_by_type.py -atoms atoms.data \\
-bonds bonds.data \\
-bondsbytype bonds_by_type.data \\
> new_bonds.data
"""
g_program_name = __file__.split('/')[-1] # = 'bonds_by_type.py'
g_date_str = '2020-11-04'
g_version_str = '0.13.0'
import sys
try:
from . import ttree_lex
from .lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid
except (ImportError, SystemError, ValueError):
# not installed as a package
import ttree_lex
from lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid
import re
def LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='',
bond_ids_offset=0):
# report_progress = False):
"""
LookupBondTypes() looks up bond types.
Output:
...It looks up the corresponding type of each bond and store it in the
"bond_types" list. (If the bond_ids were not specified by the user,
generate them and store them in the bond_ids list.)
Input (continued):
This function requires:
...a list of bonded pairs of atoms
stored in the lines_bonds variable (from the "Data Bond List"
or "Data Bonds AtomId AtomId" sections)
...and a list of atom types
stored in the lines_atoms variable (from the "Data Atoms" section)
...and a list of bond-types-as-a-function-of-atom-types
stored in the lines_bondsbytype (from the "Data Bonds By Type" section)
Generated bond_ids (if applicable) are of the form
prefix + str(number) + suffix
(where "number" begins at bond_ids_offset+1)
"""
column_names = AtomStyle2ColNames(atom_style)
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
atomids = []
atomtypes = []
atomids2types = {}
for iv in range(0, len(lines_atoms)):
line = lines_atoms[iv].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if ((len(tokens) <= i_atomid) or (len(tokens) <= i_atomtype)):
sys.stderr.write("\"" + line + "\"\n")
raise(ttree_lex.InputError(
'Error not enough columns on line ' + str(iv + 1) + ' of \"Atoms\" section.'))
tokens = ttree_lex.SplitQuotedString(line)
atomid = ttree_lex.EscCharStrToChar(tokens[i_atomid])
atomids.append(atomid)
atomtype = ttree_lex.EscCharStrToChar(tokens[i_atomtype])
atomtypes.append(atomtype)
atomids2types[atomid] = atomtype
assert(isinstance(bond_ids, list))
assert(isinstance(bond_types, list))
assert(isinstance(bond_pairs, list))
del bond_ids[:]
del bond_types[:]
del bond_pairs[:]
for ie in range(0, len(lines_bonds)):
line = lines_bonds[ie].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) == 0:
continue
tokens = ttree_lex.SplitQuotedString(line)
if section_name == "Data Bonds AtomId AtomId":
if len(tokens) == 2:
bondid_n = bond_ids_offset + len(bond_ids) + 1
bond_ids.append(prefix + str(bondid_n) + suffix)
bond_pairs.append((ttree_lex.EscCharStrToChar(tokens[0]),
ttree_lex.EscCharStrToChar(tokens[1])))
else:
raise(ttree_lex.InputError('Incorrect number of columns on line ' +
str(ie + 1) + ' of \"' + section_name + '\" section.'))
elif section_name == "Data Bond List":
if len(tokens) == 3:
bond_ids.append(ttree_lex.EscCharStrToChar(tokens[0]))
bond_pairs.append((ttree_lex.EscCharStrToChar(tokens[1]),
ttree_lex.EscCharStrToChar(tokens[2])))
else:
raise(ttree_lex.InputError('Incorrect number of columns on line ' +
str(ie + 1) + ' of \"' + section_name + '\" section.'))
else:
raise(ttree_lex.InputError('Internal Error (' + g_program_name +
'): Unknown section name: \"' + section_name + '\"'))
assert(len(bond_types) == 0)
typepattern_to_coefftypes = []
for i in range(0, len(lines_bondsbytype)):
line = lines_bondsbytype[i].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if (len(tokens) != 3):
raise(ttree_lex.InputError('Error: Wrong number of columns in the \"Bonds By Type\" section of data file.\n'
'Offending line:\n' +
'\"' + line + '\"\n'
'Expected 3 columns\n'))
coefftype = ttree_lex.EscCharStrToChar(tokens[0])
typepattern = []
for typestr in tokens[1:]:
if ttree_lex.HasRE(typestr):
regex_str = VarNameToRegex(typestr)
typepattern.append(re.compile(regex_str))
else:
typepattern.append(ttree_lex.EscCharStrToChar(typestr))
typepattern_to_coefftypes.append([typepattern, coefftype])
assert(len(bond_ids) == len(bond_pairs))
for ie in range(0, len(bond_ids)):
bond_types.append(None)
for ie in range(0, len(bond_ids)):
bondid = bond_ids[ie]
(atomid1, atomid2) = bond_pairs[ie]
if atomid1 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"' + atomid1 + '\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"' + atomid1 + '\"\n')
if atomid2 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"' + atomid2 + '\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"' + atomid2 + '\"\n')
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
for typepattern, coefftype in typepattern_to_coefftypes:
# use string comparisons to check if atom types match the pattern
if (ttree_lex.MatchesAll((atomtype1, atomtype2), typepattern) or
ttree_lex.MatchesAll((atomtype2, atomtype1), typepattern)):
# ("MatchesAll()" defined in "ttree_lex.py")
bond_types[ie] = coefftype
for ie in range(0, len(bond_ids)):
if not bond_types[ie]:
(atomid1, atomid2) = bond_pairs[ie]
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
raise ttree_lex.InputError('Error: No bond types defined for the bond between\n'
' atoms ' + atomid1 +
' (type ' + atomtype1 + ')\n'
' and ' + atomid2 + ' (type ' + atomtype2 + ')\n'
'\n'
' (If you are using a force field, then it probably means that you made a\n'
' mistake choosing at least one of these two @atom types from the list\n'
' of available atom types supplied by the force field. To fix it, edit\n'
' the corresponding lines in the "Data Atoms" section of your LT file.)\n')
def main():
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
if sys.version < '3':
sys.stderr.write(' (python version < 3)\n')
else:
sys.stderr.write('\n')
try:
fname_atoms = None
fname_bond_list = None
fname_bondsbytype = None
section_name = 'Data Bond List' # (This will be replaced later.)
atom_style = 'full'
prefix = ''
suffix = ''
bond_lack_types = False
argv = [arg for arg in sys.argv]
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-?') or
(argv[i].lower() == '--?') or
(argv[i].lower() == '-help') or
(argv[i].lower() == '-help')):
if i + 1 >= len(argv):
sys.stdout.write(man_page_text + '\n')
sys.exit(0)
elif argv[i].lower() == '-atoms':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Atoms" section of a LAMMPS data file.\n')
fname_atoms = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-bonds':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Bonds" section of a LAMMPS data file.\n')
fname_bond_list = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-bond-list':
if i + 1 >= len(argv):
raise ttree_lex.InputError(
'Error: ' + argv[i] + ' flag should be followed by a file name\n')
# raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing lines of\n'
# ' text which might appear in the "Bonds No Types" section of a LAMMPS data file.\n')
fname_bond_list = argv[i + 1]
section_name = "Data Bond List"
del(argv[i:i + 2])
elif argv[i].lower() == '-bondsbytype':
if i + 1 >= len(argv):
raise ttree_lex.InputError(
'Error: ' + argv[i] + ' flag should be followed by a file name\n')
# raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing\n'
# ' text which might appear in the "'+section_name+' By Type" section\n'
# ' of a LAMMPS data file.\n')
fname_bondsbytype = argv[i + 1]
del(argv[i:i + 2])
elif ((argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a an atom_style name.\n'
' (Or single quoted string which includes a space-separated\n'
' list of column names.)\n')
atom_style = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-prefix':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a prefix string\n'
' (a string you want to appear to the left of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-suffix':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a suffix string\n'
' (a string you want to appear to the right of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i + 1]
del(argv[i:i + 2])
elif argv[i][0] == '-':
raise ttree_lex.InputError('Error(' + g_program_name + '):\n'
'Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
if len(argv) != 1:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise ttree_lex.InputError('Syntax Error(' + g_program_name + '):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' ' +
(' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.)\n')
bond_types = []
bond_ids = []
bond_pairs = []
fatoms = open(fname_atoms, 'r')
fbonds = open(fname_bond_list, 'r')
fbondsbytype = open(fname_bondsbytype, 'r')
lines_atoms = fatoms.readlines()
lines_bonds = fbonds.readlines()
lines_bondsbytype = fbondsbytype.readlines()
fatoms.close()
fbonds.close()
fbondsbytype.close()
LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='')
assert(len(bond_types) == len(bond_ids) == len(bond_pairs))
ie = 0
N = len(bond_types)
for ie in range(0, N):
sys.stdout.write(bond_ids[ie] + ' ' +
bond_types[ie] + ' ' +
bond_pairs[ie][0] + ' ' +
bond_pairs[ie][1] + '\n')
except (ValueError, ttree_lex.InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == "__main__":
main()
| mit | fb027005de05101961dc5764b5ee933c | 42.366755 | 132 | 0.48047 | 4.064293 | false | false | false | false |
jewettaij/moltemplate | examples/coarse_grained/DNA_models/dsDNA_only/2strands/3bp_2particles/simple_dna_example/measure_torsional_persistence_length/raw2blockaverage.py | 4 | 2902 | #!/usr/bin/env python
err_msg = """
Typical Usage:
raw2blockaverage.py N [scale_inv] < coordinate_file
Coordinates read from the file coordinate_file are averaged in blocks
of size N, and printed to the standard output, followed by a blank line.
Excluding blank lines, the number of lines in the output equals the number
of lines in the input divided by N. If blank lines are present, then
the coordinates read from the file are assumed to represent independent
snapshots from a trajectory (animation). In this case, the block-averaging
is done repeatedly for each frame in the animation, and a new trajectory
file is written (containing blank line delimters between frames).
The optional "scale_inv" argument allows you to divide the
all of resulting averaged coordinates by the number scale_inv.
(Typically, N and scale_inv, if present, are equal to each other.)
Example:
raw2blockaverage.py 2 < coords.raw > coords_ave2.raw
raw2blockaverage.py 3 3 < coords.raw > coords_ave3_normalized.raw
"""
import sys
from math import *
#import numpy as np
def ProcessStructure(x_id, n_ave, scale):
D = len(x_id[0])
n_orig = len(x_id)
for i in range(0, n_orig/n_ave):
xave_d = [0.0 for d in range(0, D)]
for j in range(0, n_ave):
for d in range(0, D):
xave_d[d] += x_id[n_ave*i + j][d]
for d in range(0, D):
xave_d[d] *= scale/float(n_ave)
sys.stdout.write(str(xave_d[0]))
for d in range(1, D):
sys.stdout.write(' '+str(xave_d[d]))
sys.stdout.write('\n')
# Parse the argument list:
if len(sys.argv) <= 1:
sys.stderr.write("Error:\n\nTypical Usage:\n\n"+err_msg+"\n")
exit(1)
n_ave = int(sys.argv[1])
scale = 1.0
if len(sys.argv) > 2:
scale = 1.0 / float(sys.argv[2])
# Now read the input file:
x_id = []
count_structs = 0
is_new_structure = True
interpret_blank_lines_as_new_structures = True
in_file = sys.stdin
for line_orig in in_file:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
if (interpret_blank_lines_as_new_structures and
(len(x_id) > 0)):
# blank (or comment) lines signal the next frame of animation
ProcessStructure(x_id, n_ave, scale)
sys.stdout.write('\n')
x_id = []
count_structs += 1
#sys.stderr.write('done\n')
is_new_structure = True
continue # skip blank lines or comments
elif is_new_structure:
is_new_structure = False
# x_d contains the coordinates read from the
# most recent line in the current frame
x_d = map(float, tokens)
x_id.append(x_d)
if len(x_id) > 0:
ProcessStructure(x_id, n_ave, scale)
| mit | 09e32bb98ff551931d2348fca64e9d08 | 27.174757 | 78 | 0.620951 | 3.301479 | false | false | false | false |
jewettaij/moltemplate | moltemplate/extract_espresso_atom_types.py | 2 | 1114 | #!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013, Regents of the University of California
import sys
def main():
for line_orig in sys.stdin:
line = line_orig.rstrip('\n')
comment = ''
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
comment = ' '+line_orig[ic:].rstrip('\n')
tokens = line.strip().split()
if len(tokens) > 2:
atomid = -1
atomtype = -1
pos_found = False
for i in range(0,len(tokens)):
if (tokens[i] == 'part') and (i+1 < len(tokens)):
atomid = tokens[i+1]
elif (tokens[i] == 'type') and (i+1 < len(tokens)):
atomtype = tokens[i+1]
elif (tokens[i] == 'pos') and (i+2 < len(tokens)):
pos_found = True
if (atomid != -1) and (atomtype != -1) and pos_found:
sys.stdout.write(atomid+' '+atomtype+'\n')
if __name__ == "__main__":
main()
| mit | db09a60c48b686cac69041dda8cf71f4 | 31.764706 | 67 | 0.477558 | 3.427692 | false | false | false | false |
jewettaij/moltemplate | examples/coarse_grained/DNA_models/dsDNA_only/2strands/3bp_2particles/simple_dna_example/measure_torsional_persistence_length/raw2subtractlines.py | 4 | 3200 | #!/usr/bin/env python
err_msg = """
Typical Usage:
raw2subtractlines.py [-norm] < coordinate_file
Coordinates read from one line of the file are subtracted from coordinates
from the next line of the file (if it contains coordinates) and printed to
the standard output. Blank lines in the input file are copied to the
standard out. Each block of N lines of text containing M columns in the
input file produces a block of N-1 lines of text (containing M columns)
in the output file.
The optional "-norm" argument allows you to normalize the resulting vectors
after they have been subtracted.
Examples:
raw2subtractlines.py < coord_bead_chain.raw > coords_bond_vector.raw
raw2subtractlines.py -norm < coord_bead_chain.raw > coords_bond_direction.raw
"""
import sys
from math import *
#import numpy as np
def ProcessStructure(x_id, normalize=False):
D = len(x_id[0])
N = len(x_id)
for i in range(0, N-1):
for d in range(0, D):
x_diff = [x_id[i+1][d] - x_id[i][d] for d in range(0,D)]
if (normalize):
x_diff_len = 0.0
for d in range(0, D):
x_diff_len += x_diff[d] * x_diff[d]
x_diff_len = sqrt(x_diff_len)
for d in range(0, D):
x_diff[d] /= x_diff_len
sys.stdout.write(str(x_diff[0]))
for d in range(1, D):
sys.stdout.write(' ' + str(x_diff[d]))
sys.stdout.write('\n')
# Parse the argument list:
if (len(sys.argv) > 2):
sys.stderr.write("Error:\n\nTypical Usage:\n\n"+err_msg+"\n")
exit(1)
if ((len(sys.argv) == 2) and
((sys.argv[1] == '-h') or
(sys.argv[1] == '-?') or
(sys.argv[1] == '--help'))):
sys.stderr.write("Error:\n\nTypical Usage:\n\n"+err_msg+"\n")
exit(1)
normalize = False
if (len(sys.argv) == 2):
if ((sys.argv[1] == '-n') or
(sys.argv[1] == '-norm') or
(sys.argv[1] == '-normalize')):
normalize = True
else:
sys.stderr.write("Error: Unrecognized command line argument:\n"
" \""+sys.argv[1]+"\"\n")
exit(1)
# Now read the input file:
x_id = []
count_structs = 0
is_new_structure = True
interpret_blank_lines_as_new_structures = True
in_file = sys.stdin
for line_orig in in_file:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
if (interpret_blank_lines_as_new_structures and
(len(x_id) > 0)):
# blank (or comment) lines signal the next frame of animation
ProcessStructure(x_id, normalize)
sys.stdout.write('\n')
x_id = []
count_structs += 1
#sys.stderr.write('done\n')
is_new_structure = True
continue # skip blank lines or comments
elif is_new_structure:
is_new_structure = False
# x_d contains the coordinates read from the
# most recent line in the current frame
x_d = list(map(float, tokens))
x_id.append(x_d)
if len(x_id) > 0:
ProcessStructure(x_id, normalize)
| mit | 00919cd0882ef8532aebf3ea7a201ab4 | 26.350427 | 80 | 0.575938 | 3.255341 | false | false | false | false |
jewettaij/moltemplate | moltemplate/lttree.py | 1 | 47134 | #!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.moltemplate.org
# http://www.chem.ucsb.edu/~sheagroup
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013, Regents of the University of California
# All rights reserved.
"""
lttree.py
lttree.py is an extension of the generic ttree.py program.
This version can understand and manipulate ttree-style templates which
are specialized for storing molecule-specific data for use in LAMMPS.
The main difference between lttree.py and ttree.py is:
Unlike ttree.py, lttree.py understands rigid-body movement commands like
"rot()" and "move()" which allows it to reorient and move each copy
of a molecule to a new location. (ttree.py just ignores these commands.
Consequently LAMMPS input file (fragments) created with ttree.py have
invalid (overlapping) atomic coordinates and must be modified or aguemted
later (by loading atomic coordinates from a PDB file or an XYZ file).
lttree.py understands the "Data Atoms" section of a LAMMPS
data file (in addition to the various "atom_styles" which effect it).
Additional LAMMPS-specific features may be added in the future.
"""
g_program_name = __file__.split('/')[-1] # ='lttree.py'
g_date_str = '2022-6-05'
g_version_str = '0.80.4'
import sys
from collections import defaultdict
import pkg_resources
try:
from .ttree import BasicUISettings, BasicUIParseArgs, EraseTemplateFiles, \
StackableCommand, PopCommand, PopRightCommand, PopLeftCommand, \
PushCommand, PushLeftCommand, PushRightCommand, ScopeCommand, \
WriteVarBindingsFile, StaticObj, InstanceObj, \
BasicUI, ScopeBegin, ScopeEnd, WriteFileCommand, Render
from .ttree_lex import InputError, TextBlock, DeleteLinesWithBadVars, \
TemplateLexer, TableFromTemplate, VarRef, TextBlock, ErrorLeader, \
SplitQuotedString
from .lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid, \
ColNames2Coords, ColNames2Vects, \
data_atoms, data_prefix, data_masses, \
data_velocities, data_ellipsoids, data_triangles, data_lines, \
data_pair_coeffs, data_bond_coeffs, data_angle_coeffs, \
data_dihedral_coeffs, data_improper_coeffs, data_bondbond_coeffs, \
data_bondangle_coeffs, data_middlebondtorsion_coeffs, \
data_endbondtorsion_coeffs, data_angletorsion_coeffs, \
data_angleangletorsion_coeffs, data_bondbond13_coeffs, \
data_angleangle_coeffs, data_bonds_by_type, data_angles_by_type, \
data_dihedrals_by_type, data_impropers_by_type, \
data_bonds, data_bond_list, data_angles, data_dihedrals, data_impropers, \
data_boundary, data_pbc, data_prefix_no_space, in_init, in_settings, \
in_prefix
from .ttree_matrix_stack import AffineTransform, MultiAffineStack, \
LinTransform, Matrix2Quaternion, MultQuat
except (ImportError, SystemError, ValueError):
# not installed as a package
from ttree import *
from ttree_lex import *
from lttree_styles import *
from ttree_matrix_stack import *
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
class LttreeSettings(BasicUISettings):
def __init__(self,
user_bindings_x=None,
user_bindings=None,
order_method='by_command'):
BasicUISettings.__init__(self,
user_bindings_x,
user_bindings,
order_method)
# The following new member data indicate which columns store
# LAMMPS-specific information.
# The next 6 members store keep track of the different columns
# of the "Data Atoms" section of a LAMMPS data file:
self.column_names = [] # <--A list of column names (optional)
self.ii_coords = [] # <--A list of triplets of column indexes storing coordinate data
self.ii_vects = [] # <--A list of triplets of column indexes storing directional data
# (such as dipole or ellipsoid orientations)
self.i_atomid = None # <--An integer indicating which column has the atomid
self.i_atomtype = None # <--An integer indicating which column has the atomtype
self.i_molid = None # <--An integer indicating which column has the molid, if applicable
self.print_full_atom_type_name_in_masses = False # <--how to print atom type names in the "Masses" section of a DATA file?
def LttreeParseArgs(argv, settings, main=False, show_warnings=True):
# By default, include force_fields provided with the package
argv.extend(["-import-path",
pkg_resources.resource_filename(__name__, 'force_fields/')])
BasicUIParseArgs(argv, settings)
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i + 1 >= len(argv):
raise InputError('Error(' + g_program_name + '): The ' + argv[i] + ' flag should be followed by a LAMMPS\n'
' atom_style name (or single quoted string containing a space-separated\n'
' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n')
settings.column_names = AtomStyle2ColNames(argv[i + 1])
sys.stderr.write('\n \"' + data_atoms + '\" column format:\n')
sys.stderr.write(
' ' + (' '.join(settings.column_names)) + '\n\n')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(
settings.column_names)
del(argv[i:i + 2])
elif (argv[i].lower() == '-icoord'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for coordinates in\n'
' the \"' + data_atoms + '\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.iaffinevects = []
for i in range(0, len(ilist) / 3):
cols = [int(ilist[3 * i]) + 1,
int(ilist[3 * i + 1]) + 1,
int(ilist[3 * i + 2]) + 1]
settings.iaffinevects.append(cols)
del(argv[i:i + 2])
elif (argv[i].lower() == '-ivect'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for direction vectors in\n'
' the \"' + data_atoms + '\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.ivects = []
for i in range(0, len(ilist) / 3):
cols = [int(ilist[3 * i]) + 1,
int(ilist[3 * i + 1]) + 1,
int(ilist[3 * i + 2]) + 1]
settings.ivects.append(cols)
del(argv[i:i + 2])
elif ((argv[i].lower() == '-iatomid') or
(argv[i].lower() == '-iid') or
(argv[i].lower() == '-iatom-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the atom id number (typically 1).\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomid = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif ((argv[i].lower() == '-iatomtype') or
(argv[i].lower() == '-itype') or
(argv[i].lower() == '-iatom-type')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the atom type.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomtype = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif ((argv[i].lower() == '-imolid') or
(argv[i].lower() == '-imol') or
(argv[i].lower() == '-imol-id') or
(argv[i].lower() == '-imoleculeid') or
(argv[i].lower() == '-imolecule-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the molecule id number.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_molid = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif (argv[i].lower() == '-full-comment-names'):
settings.print_full_atom_type_name_in_masses = True
del(argv[i:i + 1])
elif (argv[i].lower() == '-short-comment-names'):
settings.print_full_atom_type_name_in_masses = False
del(argv[i:i + 1])
elif (argv[i].find('-') == 0) and main:
# elif (__name__ == "__main__"):
raise InputError('Error(' + g_program_name + '):\n'
'Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
if main:
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.
# Otherwise, then name of that file will be determined later by the
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
# Parse text from the file named argv[1]
settings.lex.infile = argv[1]
settings.lex.instream = open(argv[1], 'r')
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"' + argv[1] + '\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise InputError('Syntax Error(' + g_program_name + '):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' ' + (' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
if len(settings.ii_coords) == 0 and show_warnings:
sys.stderr.write('########################################################\n'
'## WARNING: atom_style unspecified ##\n'
'## --> \"' + data_atoms + '\" column data has an unknown format ##\n'
'## Assuming atom_style = \"full\" ##\n'
# '########################################################\n'
# '## To specify the \"'+data_atoms+'\" column format you can: ##\n'
# '## 1) Use the -atomstyle \"STYLE\" argument ##\n'
# '## where \"STYLE\" is a string indicating a LAMMPS ##\n'
# '## atom_style, including hybrid styles.(Standard ##\n'
# '## atom styles defined in 2011 are supported.) ##\n'
# '## 2) Use the -atomstyle \"COL_LIST\" argument ##\n'
# '## where \"COL_LIST" is a quoted list of strings ##\n'
# '## indicating the name of each column. ##\n'
# '## Names \"x\",\"y\",\"z\" are interpreted as ##\n'
# '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n'
# '## are interpreted as direction vectors. ##\n'
# '## 3) Use the -icoord \"cx cy cz...\" argument ##\n'
# '## where \"cx cy cz\" is a list of integers ##\n'
# '## indicating the column numbers for the x,y,z ##\n'
# '## coordinates of each atom. ##\n'
# '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n'
# '## where \"cmux cmuy cmuz...\" is a list of ##\n'
# '## integers indicating the column numbers for ##\n'
# '## the vector that determines the direction of a ##\n'
# '## dipole or ellipsoid (ie. a rotateable vector).##\n'
# '## (More than one triplet can be specified. The ##\n'
# '## number of entries must be divisible by 3.) ##\n'
'########################################################\n')
# The default atom_style is "full"
settings.column_names = AtomStyle2ColNames('full')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(
settings.column_names)
return
def TransformAtomText(text, matrix, settings):
""" Apply transformations to the coordinates and other vector degrees
of freedom stored in the \"Data Atoms\" section of a LAMMPS data file.
This is the \"text\" argument.
The \"matrix\" stores the aggregate sum of combined transformations
to be applied.
"""
#sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n')
lines = text.split('\n')
for i in range(0, len(lines)):
line_orig = lines[i]
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
comment = ' ' + line_orig[ic:].rstrip('\n')
else:
line = line_orig.rstrip('\n')
comment = ''
# Split the line into words (columns) using whitespace delimeters
columns = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(columns) > 0:
if len(columns) == len(settings.column_names) + 3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters \n'
' within the \"' + data_atoms + '\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) < len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n'
' (Alternatively this error can be caused by a missing } character.)\n')
x0 = [0.0, 0.0, 0.0]
x = [0.0, 0.0, 0.0]
# Atomic coordinates transform using "affine" transformations
# (translations plus rotations [or other linear transformations])
for cxcycz in settings.ii_coords:
for d in range(0, 3):
x0[d] = float(columns[cxcycz[d]])
AffineTransform(x, matrix, x0) # x = matrix * x0 + b
for d in range(0, 3): # ("b" is part of "matrix")
columns[cxcycz[d]] = str(x[d])
# Dipole moments and other direction-vectors
# are not effected by translational movement
for cxcycz in settings.ii_vects:
for d in range(0, 3):
x0[d] = float(columns[cxcycz[d]])
LinTransform(x, matrix, x0) # x = matrix * x0
for d in range(0, 3):
columns[cxcycz[d]] = str(x[d])
lines[i] = ' '.join(columns) + comment
return '\n'.join(lines)
def TransformEllipsoidText(text, matrix, settings):
""" Apply the transformation matrix to the quaternions represented
by the last four numbers on each line.
The \"matrix\" stores the aggregate sum of combined transformations
to be applied and the rotational part of this matrix
must be converted to a quaternion.
"""
#sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n')
lines = text.split('\n')
for i in range(0, len(lines)):
line_orig = lines[i]
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
comment = ' ' + line_orig[ic:].rstrip('\n')
else:
line = line_orig.rstrip('\n')
comment = ''
# Split the line into words (columns) using whitespace delimeters
columns = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(columns) != 0:
if len(columns) != 8:
raise InputError('Error (lttree.py): Expected 7 numbers'
+ ' instead of '
+ str(len(columns))
+ '\nline:\n'
+ line
+ ' in each line of the ellipsoids\" section.\n"')
q_orig = [float(columns[-4]),
float(columns[-3]),
float(columns[-2]),
float(columns[-1])]
qRot = [0.0, 0.0, 0.0, 0.0]
Matrix2Quaternion(matrix, qRot)
q_new = [0.0, 0.0, 0.0, 0.0]
MultQuat(q_new, qRot, q_orig)
columns[-4] = str(q_new[0])
columns[-3] = str(q_new[1])
columns[-2] = str(q_new[2])
columns[-1] = str(q_new[3])
lines[i] = ' '.join(columns) + comment
return '\n'.join(lines)
def CalcCM(text_Atoms,
text_Masses=None,
settings=None):
types2masses = None
# Loop through the "Masses" section: what is the mass of each atom type?
if text_Masses != None:
types2masses = {}
lines = text_Masses.split('\n')
for i in range(0, len(lines)):
line = lines[i]
# Split the line into words (columns) using whitespace delimeters
columns = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(columns) == 2:
atomtype = columns[0]
m = float(columns[1])
types2masses[atomtype] = m
lines = text_Atoms.split('\n')
# Pass 1 through the "Data Atoms" section: Determine each atom's mass
if text_Masses != None:
assert(settings != None)
for i in range(0, len(lines)):
line = lines[i]
# Split the line into words (columns) using whitespace delimeters
columns = SplitQuotedString(line,
quotes='{',
endquote='}')
atomid = columns[settings.i_atomid]
atomtype = columns[settings.i_atomtype]
if atomtype not in types2masses[atomtype]:
raise InputError('Error(lttree): You have neglected to define the mass of atom type: \"' + atomtype + '\"\n'
'Did you specify the mass of every atom type using write(\"Masses\"){}?')
atomid2mass[atomid] = atomtype2mass[atomtype]
# Pass 2 through the "Data Atoms" section: Find the center of mass.
for i in range(0, len(lines)):
line = lines[i]
# Split the line into words (columns) using whitespace delimeters
columns = SplitQuotedString(line,
quotes='{',
endquote='}')
if len(columns) > 0:
if len(columns) == len(settings.column_names) + 3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters (ix, iy, iz)\n'
' within the \"' + data_atoms + '\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) != len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n')
x = [0.0, 0.0, 0.0]
if atomids2masses != None:
m = atomids2masses[atomid]
else:
m = 1.0
tot_m += m
for cxcycz in settings.ii_coords:
for d in range(0, 3):
x[d] = float(columns[cxcycz[d]])
tot_x[d] += x[d]
# Note: dipole moments and other direction vectors don't effect
# the center of mass. So I commented out the loop below.
# for cxcycz in settings.ii_vects:
# for d in range(0,3):
# v[d] = float(columns[cxcycz[d]])
lines[i] = ' '.join(columns)
xcm = [0.0, 0.0, 0.0]
for d in range(0, 3):
xcm[d] = tot_x[d] / tot_m
return xcm
def AddAtomTypeComments(tmpl_list, substitute_vars, print_full_atom_type_names):
"""
This ugly code attempts to parse the text in the "Masses" section
of a LAMMPS DATA file, and append comments to the end of every line
defining the atom type. Each comment will contain a string which stores
the name of the @atom-style variable (excluding the "@atom:" prefix).
This is unfortunately complicated and messy because we have to do
this before we render the text. (IE before we substutite numeric
values into the variables. Once we've rendered the text,
the variable names are discarded.)
Therefore we have to work with a messy "tmpl_list" object
which contains the text in a pre-rendered form. The "tmpl_list" object
is a list of alternating TextBlocks and VarRef objects.
This function rebuilds this tmpl_list object, splitting it into separate
lines (which it currently is not) and then adding comments to the end
of each line (if there isn't one there already). Finally it renders
the resulting template and returns that text to the caller.
"""
table = TableFromTemplate(tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, True])
for i in range(0, len(table)):
j = 0
if isinstance(table[i][0], TextBlock):
j += 1
assert(hasattr(table[i], '__len__'))
syntax_err = False
if len(table[i]) == j+0:
pass # skip blank lines
elif ((len(table[i]) > j+0) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass # skip comment lines
if ((len(table[i]) > j+1) and
isinstance(table[i][j+0], VarRef) and
isinstance(table[i][j+1], TextBlock)):
var_ref = table[i][j+0]
if print_full_atom_type_names:
var_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
else:
var_name = var_ref.nptr.leaf_node.name
# remove the "@atom:" prefix before the variable name:
if var_name.find('@atom:') == 0:
var_name = var_name[6:]
elif var_name.find('@/atom:') == 0:
var_name = var_name[7:]
new_comment = ' # ' + var_name
if (len(table[i]) == j+2):
table[i].append(TextBlock(new_comment,
table[i][j+1].srcloc))
else:
assert(len(table[i]) > j+2)
assert(isinstance(table[i][j+2], TextBlock))
# If this line doesn't already contain a comment, then add one
if table[i][j+2].text.find('#') == -1:
table[i][j+2].text += new_comment
else:
# Insert a space between 2nd column and the comment
table[i][j+2].text = ' '+table[i][j+2].text
# Also add spaces between any words within the comments. This is
# necessary because TableFromTemplate() removed all whitespace
for k in range(j+3, len(table[i])):
table[i][k].text = ' '+table[i][k].text
# We must insert a space between the first and second columns
# because TableFromTemplate() removes this whitespace separator.
table[i].insert(j+1, TextBlock(' ', table[i][j+1].srcloc))
else:
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][j+0].srcloc.infile,
table[i][j+0].srcloc.lineno) + '\n'
' The format is incorrect.\n')
# Add a newline:
table[i].append(TextBlock('\n',table[i][j+1].srcloc))
# Now flatten the "table" (which is a list-of-lists)
# into a simple 1-dimensional list
# (of alternating VarRefs and TextBlocks, in this case)
templ_list = [entry for sublist in table for entry in sublist]
# Note: This is equivalent to
# templ_list = []
# for sublist in table:
# for entry in sublist:
# templ_list.append(entry)
# When building list comprehensions with multiple "for" tokens,
# the outer loop comes first (ie "for sublist in table")
# Now render this text and return it to the caller:
return Render(templ_list, substitute_vars)
def _ExecCommands(command_list,
index,
global_files_content,
settings,
matrix_stack,
current_scope_id=None,
substitute_vars=True):
"""
_ExecCommands():
The argument "commands" is a nested list of lists of
"Command" data structures (defined in ttree.py).
Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
Instead of writing the files, save their contents in a string.
The argument "global_files_content" should be of type defaultdict(list)
It is an associative array whose key is a string (a filename)
and whose value is a lists of strings (of rendered templates).
"""
files_content = defaultdict(list)
postprocessing_commands = []
while index < len(command_list):
command = command_list[index]
index += 1
# For debugging only
if ((not isinstance(command, StackableCommand)) and
(not isinstance(command, ScopeCommand)) and
(not isinstance(command, WriteFileCommand))):
sys.stderr.write(str(command) + '\n')
if isinstance(command, PopCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
if isinstance(command, PopRightCommand):
matrix_stack.PopRight(which_stack=command.context_node)
elif isinstance(command, PopLeftCommand):
matrix_stack.PopLeft(which_stack=command.context_node)
else:
assert(False)
elif isinstance(command, PushCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
# Some commands are post-processing commands, and must be
# carried out AFTER all the text has been rendered. For example
# the "movecm(0,0,0)" waits until all of the coordinates have
# been rendered, calculates the center-of-mass, and then applies
# a translation moving the center of mass to the origin (0,0,0).
# We need to figure out which of these commands need to be
# postponed, and which commands can be carried out now.
# ("now"=pushing transformation matrices onto the matrix stack).
# UNFORTUNATELY POSTPONING SOME COMMANDS MAKES THE CODE UGLY
transform_list = command.contents.split('.')
transform_blocks = []
i_post_process = -1
# Example: Suppose:
#command.contents = '.rot(30,0,0,1).movecm(0,0,0).rot(45,1,0,0).scalecm(2.0).move(-2,1,0)'
# then
#transform_list = ['rot(30,0,0,1)', 'movecm(0,0,0)', 'rot(45,1,0,0)', 'scalecm(2.0)', 'move(-2,1,0)']
# Note: the first command 'rot(30,0,0,1)' is carried out now.
# The remaining commands are carried out during post-processing,
# (when processing the "ScopeEnd" command.
#
# We break up the commands into "blocks" separated by center-
# of-mass transformations ('movecm', 'rotcm', or 'scalecm')
#
# transform_blocks = ['.rot(30,0,0,1)',
# '.movecm(0,0,0).rot(45,1,0,0)',
# '.scalecm(2.0).move(-2,1,0)']
i = 0
while i < len(transform_list):
transform_block = ''
while i < len(transform_list):
transform = transform_list[i]
i += 1
if transform != '':
transform_block += '.' + transform
transform = transform.split('(')[0]
if ((transform == 'movecm') or
(transform == 'rotcm') or
(transform == 'scalecm')):
break
transform_blocks.append(transform_block)
if len(postprocessing_commands) == 0:
# The first block (before movecm, rotcm, or scalecm)
# can be executed now by modifying the matrix stack.
if isinstance(command, PushRightCommand):
matrix_stack.PushCommandsRight(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
elif isinstance(command, PushLeftCommand):
matrix_stack.PushCommandsLeft(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
# Everything else must be saved for later.
postprocessing_blocks = transform_blocks[1:]
else:
# If we already encountered a "movecm" "rotcm" or "scalecm"
# then all of the command blocks must be handled during
# postprocessing.
postprocessing_blocks = transform_blocks
for transform_block in postprocessing_blocks:
assert(isinstance(block, basestring))
if isinstance(command, PushRightCommand):
postprocessing_commands.append(PushRightCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, PushLeftCommand):
postprocessing_commands.append(PushLeftCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, WriteFileCommand):
# --- Throw away lines containin references to deleted variables:---
# First: To edit the content of a template,
# you need to make a deep local copy of it
tmpl_list = []
for entry in command.tmpl_list:
if isinstance(entry, TextBlock):
tmpl_list.append(TextBlock(entry.text,
entry.srcloc)) # , entry.srcloc_end))
else:
tmpl_list.append(entry)
# Now throw away lines with deleted variables
DeleteLinesWithBadVars(tmpl_list)
# --- Now render the text ---
text = Render(tmpl_list,
substitute_vars)
# ---- Coordinates of the atoms, must be rotated
# and translated after rendering.
# In addition, other vectors (dipoles, ellipsoid orientations)
# must be processed.
# This requires us to re-parse the contents of this text
# (after it has been rendered), and apply these transformations
# before passing them on to the caller.
if command.filename == data_atoms:
text = TransformAtomText(text, matrix_stack.M, settings)
elif command.filename == data_ellipsoids:
text = TransformEllipsoidText(text, matrix_stack.M, settings)
if command.filename == data_masses:
text = AddAtomTypeComments(tmpl_list,
substitute_vars,
settings.print_full_atom_type_name_in_masses)
files_content[command.filename].append(text)
elif isinstance(command, ScopeBegin):
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PushStack(command.node)
# "command_list" is a long list of commands.
# ScopeBegin and ScopeEnd are (usually) used to demarcate/enclose
# the commands which are issued for a single class or
# class instance. _ExecCommands() carries out the commands for
# a single class/instance. If we reach a ScopeBegin(),
# then recursively process the commands belonging to the child.
index = _ExecCommands(command_list,
index,
files_content,
settings,
matrix_stack,
command.node,
substitute_vars)
elif isinstance(command, ScopeEnd):
if data_atoms in files_content:
for ppcommand in postprocessing_commands:
if data_masses in files_content:
xcm = CalcCM(files_content[data_atoms],
files_content[data_masses],
settings)
else:
xcm = CalcCM(files_content[data_atoms])
if isinstance(ppcommand, PushRightCommand):
matrix_stack.PushCommandsRight(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
elif isinstance(ppcommand, PushLeftCommand):
matrix_stack.PushCommandsLeft(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
files_content[data_atoms] = \
TransformAtomText(files_content[data_atoms],
matrix_stack.M, settings)
files_content[data_ellipsoids] = \
TransformEllipsoidText(files_content[data_ellipsoids],
matrix_stack.M, settings)
for ppcommand in postprocessing_commands:
matrix_stack.Pop(which_stack=command.context_node)
#(same as PopRight())
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PopStack()
# "ScopeEnd" means we're done with this class/instance.
break
else:
assert(False)
# no other command types allowed at this point
# After processing the commands in this list,
# merge the templates with the callers template list
for filename, tmpl_list in files_content.items():
global_files_content[filename] += \
files_content[filename]
return index
def ExecCommands(commands,
files_content,
settings,
substitute_vars=True):
matrix_stack = MultiAffineStack()
index = _ExecCommands(commands,
0,
files_content,
settings,
matrix_stack,
None,
substitute_vars)
assert(index == len(commands))
def WriteFiles(files_content, suffix='', write_to_stdout=True):
for filename, str_list in files_content.items():
if filename != None:
out_file = None
if filename == '':
if write_to_stdout:
out_file = sys.stdout
else:
out_file = open(filename + suffix, 'a')
if out_file != None:
out_file.write(''.join(str_list))
if filename != '':
out_file.close()
return
def main():
"""
This is is a "main module" wrapper for invoking lttree.py
as a stand alone program. This program:
1)reads a ttree file,
2)constructs a tree of class definitions (g_objectdefs)
3)constructs a tree of instantiated class objects (g_objects),
4)automatically assigns values to the variables,
5)and carries out the "write" commands to write the templates a file(s).
"""
####### Main Code Below: #######
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
sys.stderr.write('\n(python version ' + str(sys.version) + ')\n')
if sys.version < '2.6':
raise InputError(
'Error: Alas, you must upgrade to a newer version of python.')
try:
#settings = BasicUISettings()
#BasicUIParseArgs(sys.argv, settings)
settings = LttreeSettings()
LttreeParseArgs([arg for arg in sys.argv], #(deep copy of sys.argv)
settings, main=True, show_warnings=True)
# Data structures to store the class definitionss and instances
g_objectdefs = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
g_objects = InstanceObj('', None) # The root of the instance tree
# has name '' (equivalent to '/')
# A list of commands to carry out
g_static_commands = []
g_instance_commands = []
BasicUI(settings,
g_objectdefs,
g_objects,
g_static_commands,
g_instance_commands)
# Interpret the the commands. (These are typically write() or
# write_once() commands, rendering templates into text.
# This step also handles coordinate transformations and delete commands.
# Coordinate transformations can be applied to the rendered text
# as a post-processing step.
sys.stderr.write(' done\nbuilding templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands,
files_content,
settings,
False)
ExecCommands(g_instance_commands,
files_content,
settings,
False)
# Finally: write the rendered text to actual files.
# Erase the files that will be written to:
sys.stderr.write(' done\nwriting templates...')
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
# Write the files as templates
# (with the original variable names present)
WriteFiles(files_content, suffix=".template", write_to_stdout=False)
# Write the files with the variables substituted by values
sys.stderr.write(' done\nbuilding and rendering templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands, files_content, settings, True)
ExecCommands(g_instance_commands, files_content, settings, True)
sys.stderr.write(' done\nwriting rendered templates...\n')
WriteFiles(files_content)
sys.stderr.write(' done\n')
# Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
# <-- erase previous version.
open('ttree_assignments.txt', 'w').close()
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:
if isinstance(err, ValueError):
sys.stderr.write('Error converting string to numeric format.\n'
' This sometimes means you have forgotten to specify the atom style\n'
' (using the \"-atomstyle\" command). Alternatively it could indicate\n'
' that the moltemplate file contains non-numeric text in one of the\n'
' .move(), .rot(), .scale(), .matrix(), or .quat() commands. If neither of\n'
' these scenarios apply, please report this bug. (jewett.aij at gmail.com)\n')
sys.exit(-1)
else:
sys.stderr.write('\n\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
| mit | 4bb24ce2f97575b30e3b02999642bdde | 47.046891 | 130 | 0.51046 | 4.328588 | false | false | false | false |
jewettaij/moltemplate | moltemplate/force_fields/cooke_deserno_supporting_files/gen_potential-cooke.py | 2 | 4380 | #!/usr/bin/python
import os,sys
from fractions import Fraction
from numpy import *
### PARAMETERS ###
sigma = 1.00
epsilon = 1.00
b_hh = 0.95 * sigma
b_ht = 0.95 * sigma
b_tt = 1.00 * sigma
r_init = 0.000001
r_max = sigma * 3.
r_space = 0.01
##################
### INPUTS ###
if len(sys.argv) == 2:
w_cut = float(sys.argv[1])
else:
w_cut = 1.6
# 1.6 seems to be 'good' for vesicles, bilayers 1.4
##############
def WCA_energy(b, r):
# Calculate WCA energy
E_pot = 0
val1 = math.pow((b / r), 12)
val2 = -math.pow((b / r), 6)
E_pot = 4 * epsilon * (val1 + val2 + 0.25)
return E_pot
def WCA_forces(b, r):
# Calculate WCA forces
Force = 0
val1 = 24 * math.pow(b, 6) / math.pow(r, 7)
val2 = -48 * math.pow(b, 12) / math.pow(r, 13)
Force = -(val1 + val2)
return Force
def Tail_energy(b, r, r_cut):
# Calculate extra Tail energy
E_pot = 0
if (r < r_cut):
E_pot = -1 * epsilon
else:
val1 = math.cos((math.pi * (r - r_cut)) / (2 * w_cut))
E_pot = -1 * epsilon * math.pow(val1, 2)
return E_pot
def Tail_forces(b, r, r_cut):
Force = 0
if (r < r_cut):
Force = 0;
else:
val1 = math.sin((math.pi * (r - r_cut)) / w_cut)
Force = -math.pi * val1 / (2 * w_cut)
return Force
##############
ofile = open('tabulated_potential.dat', 'w')
tot_potential_hh = zeros((int(r_max / r_space) + 1, 4))
tot_potential_ht = zeros((int(r_max / r_space) + 1, 4))
tot_potential_tt = zeros((int(r_max / r_space) + 1, 4))
# Setup up formatting & distances in all arrays
for i in range(int(r_max / r_space)+1):
tot_potential_hh[:,0][i] = i+1
tot_potential_ht[:,0][i] = i+1
tot_potential_tt[:,0][i] = i+1
for i in range(1, int(r_max / r_space)+1):
tot_potential_hh[:,1][i] = tot_potential_hh[:,1][i-1] + r_space
tot_potential_ht[:,1][i] = tot_potential_ht[:,1][i-1] + r_space
tot_potential_tt[:,1][i] = tot_potential_tt[:,1][i-1] + r_space
tot_potential_hh[:,1][0] = r_init
tot_potential_ht[:,1][0] = r_init
tot_potential_tt[:,1][0] = r_init
ofile.write("# Tabulated potential for Cooke 3-bead lipid model, Wc = %f\n\n" % w_cut)
num = len(tot_potential_hh[:,0])
### Calcaulte first potential, H-H
ofile.write("HEAD_HEAD\n")
r_cut = 2**Fraction('1/6') * b_hh
rmax = int(r_cut / r_space)
ofile.write("N %d R %f %f\n\n" % (num, r_init, r_max))
ofile.write("1 %f %f %f\n" % (tot_potential_hh[:,1][0], tot_potential_hh[:,2][0], tot_potential_hh[:,3][0]))
for i in range(1, rmax+1):
tot_potential_hh[:,2][i] = WCA_energy(b_hh, tot_potential_hh[:,1][i])
tot_potential_hh[:,3][i] = WCA_forces(b_hh, tot_potential_hh[:,1][i])
for i in range(1, int(r_max / r_space)+1):
ofile.write("%d %f %f %f\n" % (i+1, tot_potential_hh[:,1][i], tot_potential_hh[:,2][i], tot_potential_hh[:,3][i]))
ofile.write("\n")
### Calcaulte second potential, H-T
ofile.write("HEAD_TAIL\n")
r_cut = 2**Fraction('1/6') * b_ht
rmax = int(r_cut / r_space)
ofile.write("N %d R %f %f\n\n" % (num, r_init, r_max))
ofile.write("1 %f %f %f\n" % (tot_potential_ht[:,1][0], tot_potential_ht[:,2][0], tot_potential_ht[:,3][0]))
for i in range(1, rmax+1):
tot_potential_ht[:,2][i] = WCA_energy(b_ht, tot_potential_ht[:,1][i])
tot_potential_ht[:,3][i] = WCA_forces(b_ht, tot_potential_ht[:,1][i])
for i in range(1, int(r_max / r_space)+1):
ofile.write("%d %f %f %f\n" % (i+1, tot_potential_ht[:,1][i], tot_potential_ht[:,2][i], tot_potential_ht[:,3][i]))
ofile.write("\n")
### Calcaulte third potential, T-T
# Also include extra tail-tail attraction term
ofile.write("TAIL_TAIL\n")
r_cut = 2**Fraction('1/6') * b_tt
rmax = int(r_cut / r_space)
ofile.write("N %d R %f %f\n\n" % (num, r_init, r_max))
ofile.write("1 %f %f %f\n" % (tot_potential_tt[:,1][0], tot_potential_tt[:,2][0], tot_potential_tt[:,3][0]))
for i in range(1, rmax+1):
tot_potential_tt[:,2][i] = WCA_energy(b_tt, tot_potential_tt[:,1][i])
tot_potential_tt[:,3][i] = WCA_forces(b_tt, tot_potential_tt[:,1][i])
max2 = int( (r_cut + w_cut) / r_space)
for i in range(1, max2+1):
tot_potential_tt[:,2][i] = tot_potential_tt[:,2][i] + Tail_energy(b_tt, tot_potential_tt[:,1][i], r_cut)
tot_potential_tt[:,3][i] = tot_potential_tt[:,3][i] + Tail_forces(b_tt, tot_potential_tt[:,1][i], r_cut)
for i in range(1, int(r_max / r_space)+1):
ofile.write("%d %f %f %f\n" % (i+1, tot_potential_tt[:,1][i], tot_potential_tt[:,2][i], tot_potential_tt[:,3][i]))
ofile.write("\n")
sys.exit()
| mit | 310626e372107b2b487c2f448b31851e | 29.416667 | 116 | 0.585616 | 2.302839 | false | false | false | false |
psf/black | src/blackd/middlewares.py | 1 | 1585 | from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, TypeVar
from aiohttp.web_request import Request
from aiohttp.web_response import StreamResponse
if TYPE_CHECKING:
F = TypeVar("F", bound=Callable[..., Any])
middleware: Callable[[F], F]
else:
try:
from aiohttp.web_middlewares import middleware
except ImportError:
# @middleware is deprecated and its behaviour is the default since aiohttp 4.0
# so if it doesn't exist anymore, define a no-op for forward compatibility.
middleware = lambda x: x # noqa: E731
Handler = Callable[[Request], Awaitable[StreamResponse]]
Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]]
def cors(allow_headers: Iterable[str]) -> Middleware:
@middleware
async def impl(request: Request, handler: Handler) -> StreamResponse:
is_options = request.method == "OPTIONS"
is_preflight = is_options and "Access-Control-Request-Method" in request.headers
if is_preflight:
resp = StreamResponse()
else:
resp = await handler(request)
origin = request.headers.get("Origin")
if not origin:
return resp
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["Access-Control-Expose-Headers"] = "*"
if is_options:
resp.headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
resp.headers["Access-Control-Allow-Methods"] = ", ".join(
("OPTIONS", "POST")
)
return resp
return impl
| mit | e825426b52ac7a0f75c167a7a4e87e9d | 34.222222 | 88 | 0.644795 | 4.149215 | false | false | false | false |
psf/black | tests/data/simple_cases/comments4.py | 2 | 3531 | from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent, # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component, # DRY
)
class C:
@pytest.mark.parametrize(
("post_data", "message"),
[
# metadata_version errors.
(
{},
"None is an invalid value for Metadata-Version. Error: This field is"
" required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "-1"},
"'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata"
" Version see"
" https://packaging.python.org/specifications/core-metadata",
),
# name errors.
(
{"metadata_version": "1.2"},
"'' is an invalid value for Name. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "foo-"},
"'foo-' is an invalid value for Name. Error: Must start and end with a"
" letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata",
),
# version errors.
(
{"metadata_version": "1.2", "name": "example"},
"'' is an invalid value for Version. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "example", "version": "dog"},
"'dog' is an invalid value for Version. Error: Must start and end with"
" a letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata",
),
],
)
def test_fails_invalid_post_data(
self, pyramid_config, db_request, post_data, message
):
pyramid_config.testing_securitypolicy(userid=1)
db_request.POST = MultiDict(post_data)
def foo(list_a, list_b):
results = (
User.query.filter(User.foo == "bar")
.filter( # Because foo.
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
# Another comment about the filtering on is_quux goes here.
.filter(db.not_(User.is_pending.astext.cast(db.Boolean).is_(True)))
.order_by(User.created_at.desc())
.with_for_update(key_share=True)
.all()
)
return results
def foo2(list_a, list_b):
# Standalone comment reasonably placed.
return (
User.query.filter(User.foo == "bar")
.filter(
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
)
def foo3(list_a, list_b):
return (
# Standlone comment but weirdly placed.
User.query.filter(User.foo == "bar")
.filter(
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
)
| mit | adf9a6d9237f3d2323982d6de8dba632 | 36.56383 | 88 | 0.543472 | 3.796774 | false | false | false | false |
psf/black | src/black/__init__.py | 1 | 45425 | import io
import json
import platform
import re
import sys
import tokenize
import traceback
from contextlib import contextmanager
from dataclasses import replace
from datetime import datetime
from enum import Enum
from json.decoder import JSONDecodeError
from pathlib import Path
from typing import (
Any,
Dict,
Generator,
Iterator,
List,
MutableMapping,
Optional,
Pattern,
Sequence,
Set,
Sized,
Tuple,
Union,
)
import click
from click.core import ParameterSource
from mypy_extensions import mypyc_attr
from pathspec import PathSpec
from pathspec.patterns.gitwildmatch import GitWildMatchPatternError
from _black_version import version as __version__
from black.cache import Cache, get_cache_info, read_cache, write_cache
from black.comments import normalize_fmt_off
from black.const import (
DEFAULT_EXCLUDES,
DEFAULT_INCLUDES,
DEFAULT_LINE_LENGTH,
STDIN_PLACEHOLDER,
)
from black.files import (
find_project_root,
find_pyproject_toml,
find_user_pyproject_toml,
gen_python_files,
get_gitignore,
normalize_path_maybe_ignore,
parse_pyproject_toml,
wrap_stream_for_windows,
)
from black.handle_ipynb_magics import (
PYTHON_CELL_MAGICS,
TRANSFORMED_MAGICS,
jupyter_dependencies_are_installed,
mask_cell,
put_trailing_semicolon_back,
remove_trailing_semicolon,
unmask_cell,
)
from black.linegen import LN, LineGenerator, transform_line
from black.lines import EmptyLineTracker, LinesBlock
from black.mode import (
FUTURE_FLAG_TO_FEATURE,
VERSION_TO_FEATURES,
Feature,
Mode,
TargetVersion,
supports_feature,
)
from black.nodes import (
STARS,
is_number_token,
is_simple_decorator_expression,
is_string_token,
syms,
)
from black.output import color_diff, diff, dump_to_file, err, ipynb_diff, out
from black.parsing import InvalidInput # noqa F401
from black.parsing import lib2to3_parse, parse_ast, stringify_ast
from black.report import Changed, NothingChanged, Report
from black.trans import iter_fexpr_spans
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
COMPILED = Path(__file__).suffix in (".pyd", ".so")
# types
FileContent = str
Encoding = str
NewLine = str
class WriteBack(Enum):
NO = 0
YES = 1
DIFF = 2
CHECK = 3
COLOR_DIFF = 4
@classmethod
def from_configuration(
cls, *, check: bool, diff: bool, color: bool = False
) -> "WriteBack":
if check and not diff:
return cls.CHECK
if diff and color:
return cls.COLOR_DIFF
return cls.DIFF if diff else cls.YES
# Legacy name, left for integrations.
FileMode = Mode
def read_pyproject_toml(
ctx: click.Context, param: click.Parameter, value: Optional[str]
) -> Optional[str]:
"""Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
Returns the path to a successfully found and read configuration file, None
otherwise.
"""
if not value:
value = find_pyproject_toml(ctx.params.get("src", ()))
if value is None:
return None
try:
config = parse_pyproject_toml(value)
except (OSError, ValueError) as e:
raise click.FileError(
filename=value, hint=f"Error reading configuration file: {e}"
) from None
if not config:
return None
else:
# Sanitize the values to be Click friendly. For more information please see:
# https://github.com/psf/black/issues/1458
# https://github.com/pallets/click/issues/1567
config = {
k: str(v) if not isinstance(v, (list, dict)) else v
for k, v in config.items()
}
target_version = config.get("target_version")
if target_version is not None and not isinstance(target_version, list):
raise click.BadOptionUsage(
"target-version", "Config key target-version must be a list"
)
default_map: Dict[str, Any] = {}
if ctx.default_map:
default_map.update(ctx.default_map)
default_map.update(config)
ctx.default_map = default_map
return value
def target_version_option_callback(
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
) -> List[TargetVersion]:
"""Compute the target versions from a --target-version flag.
This is its own function because mypy couldn't infer the type correctly
when it was a lambda, causing mypyc trouble.
"""
return [TargetVersion[val.upper()] for val in v]
def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
"""Compile a regular expression string in `regex`.
If it contains newlines, use verbose mode.
"""
if "\n" in regex:
regex = "(?x)" + regex
compiled: Pattern[str] = re.compile(regex)
return compiled
def validate_regex(
ctx: click.Context,
param: click.Parameter,
value: Optional[str],
) -> Optional[Pattern[str]]:
try:
return re_compile_maybe_verbose(value) if value is not None else None
except re.error as e:
raise click.BadParameter(f"Not a valid regular expression: {e}") from None
@click.command(
context_settings={"help_option_names": ["-h", "--help"]},
# While Click does set this field automatically using the docstring, mypyc
# (annoyingly) strips 'em so we need to set it here too.
help="The uncompromising code formatter.",
)
@click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
@click.option(
"-l",
"--line-length",
type=int,
default=DEFAULT_LINE_LENGTH,
help="How many characters per line to allow.",
show_default=True,
)
@click.option(
"-t",
"--target-version",
type=click.Choice([v.name.lower() for v in TargetVersion]),
callback=target_version_option_callback,
multiple=True,
help=(
"Python versions that should be supported by Black's output. [default: per-file"
" auto-detection]"
),
)
@click.option(
"--pyi",
is_flag=True,
help=(
"Format all input files like typing stubs regardless of file extension (useful"
" when piping source on standard input)."
),
)
@click.option(
"--ipynb",
is_flag=True,
help=(
"Format all input files like Jupyter Notebooks regardless of file extension "
"(useful when piping source on standard input)."
),
)
@click.option(
"--python-cell-magics",
multiple=True,
help=(
"When processing Jupyter Notebooks, add the given magic to the list"
f" of known python-magics ({', '.join(PYTHON_CELL_MAGICS)})."
" Useful for formatting cells with custom python magics."
),
default=[],
)
@click.option(
"-x",
"--skip-source-first-line",
is_flag=True,
help="Skip the first line of the source code.",
)
@click.option(
"-S",
"--skip-string-normalization",
is_flag=True,
help="Don't normalize string quotes or prefixes.",
)
@click.option(
"-C",
"--skip-magic-trailing-comma",
is_flag=True,
help="Don't use trailing commas as a reason to split lines.",
)
@click.option(
"--experimental-string-processing",
is_flag=True,
hidden=True,
help="(DEPRECATED and now included in --preview) Normalize string literals.",
)
@click.option(
"--preview",
is_flag=True,
help=(
"Enable potentially disruptive style changes that may be added to Black's main"
" functionality in the next major release."
),
)
@click.option(
"--check",
is_flag=True,
help=(
"Don't write the files back, just return the status. Return code 0 means"
" nothing would change. Return code 1 means some files would be reformatted."
" Return code 123 means there was an internal error."
),
)
@click.option(
"--diff",
is_flag=True,
help="Don't write the files back, just output a diff for each file on stdout.",
)
@click.option(
"--color/--no-color",
is_flag=True,
help="Show colored diff. Only applies when `--diff` is given.",
)
@click.option(
"--fast/--safe",
is_flag=True,
help="If --fast given, skip temporary sanity checks. [default: --safe]",
)
@click.option(
"--required-version",
type=str,
help=(
"Require a specific version of Black to be running (useful for unifying results"
" across many environments e.g. with a pyproject.toml file). It can be"
" either a major version number or an exact version."
),
)
@click.option(
"--include",
type=str,
default=DEFAULT_INCLUDES,
callback=validate_regex,
help=(
"A regular expression that matches files and directories that should be"
" included on recursive searches. An empty value means all files are included"
" regardless of the name. Use forward slashes for directories on all platforms"
" (Windows, too). Exclusions are calculated first, inclusions later."
),
show_default=True,
)
@click.option(
"--exclude",
type=str,
callback=validate_regex,
help=(
"A regular expression that matches files and directories that should be"
" excluded on recursive searches. An empty value means no paths are excluded."
" Use forward slashes for directories on all platforms (Windows, too)."
" Exclusions are calculated first, inclusions later. [default:"
f" {DEFAULT_EXCLUDES}]"
),
show_default=False,
)
@click.option(
"--extend-exclude",
type=str,
callback=validate_regex,
help=(
"Like --exclude, but adds additional files and directories on top of the"
" excluded ones. (Useful if you simply want to add to the default)"
),
)
@click.option(
"--force-exclude",
type=str,
callback=validate_regex,
help=(
"Like --exclude, but files and directories matching this regex will be "
"excluded even when they are passed explicitly as arguments."
),
)
@click.option(
"--stdin-filename",
type=str,
help=(
"The name of the file when passing it through stdin. Useful to make "
"sure Black will respect --force-exclude option on some "
"editors that rely on using stdin."
),
)
@click.option(
"-W",
"--workers",
type=click.IntRange(min=1),
default=None,
help="Number of parallel workers [default: number of CPUs in the system]",
)
@click.option(
"-q",
"--quiet",
is_flag=True,
help=(
"Don't emit non-error messages to stderr. Errors are still emitted; silence"
" those with 2>/dev/null."
),
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help=(
"Also emit messages to stderr about files that were not changed or were ignored"
" due to exclusion patterns."
),
)
@click.version_option(
version=__version__,
message=(
f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})\n"
f"Python ({platform.python_implementation()}) {platform.python_version()}"
),
)
@click.argument(
"src",
nargs=-1,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
),
is_eager=True,
metavar="SRC ...",
)
@click.option(
"--config",
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
allow_dash=False,
path_type=str,
),
is_eager=True,
callback=read_pyproject_toml,
help="Read configuration from FILE path.",
)
@click.pass_context
def main( # noqa: C901
ctx: click.Context,
code: Optional[str],
line_length: int,
target_version: List[TargetVersion],
check: bool,
diff: bool,
color: bool,
fast: bool,
pyi: bool,
ipynb: bool,
python_cell_magics: Sequence[str],
skip_source_first_line: bool,
skip_string_normalization: bool,
skip_magic_trailing_comma: bool,
experimental_string_processing: bool,
preview: bool,
quiet: bool,
verbose: bool,
required_version: Optional[str],
include: Pattern[str],
exclude: Optional[Pattern[str]],
extend_exclude: Optional[Pattern[str]],
force_exclude: Optional[Pattern[str]],
stdin_filename: Optional[str],
workers: Optional[int],
src: Tuple[str, ...],
config: Optional[str],
) -> None:
"""The uncompromising code formatter."""
ctx.ensure_object(dict)
if src and code is not None:
out(
main.get_usage(ctx)
+ "\n\n'SRC' and 'code' cannot be passed simultaneously."
)
ctx.exit(1)
if not src and code is None:
out(main.get_usage(ctx) + "\n\nOne of 'SRC' or 'code' is required.")
ctx.exit(1)
root, method = (
find_project_root(src, stdin_filename) if code is None else (None, None)
)
ctx.obj["root"] = root
if verbose:
if root:
out(
f"Identified `{root}` as project root containing a {method}.",
fg="blue",
)
normalized = [
(source, source)
if source == "-"
else (normalize_path_maybe_ignore(Path(source), root), source)
for source in src
]
srcs_string = ", ".join(
[
f'"{_norm}"'
if _norm
else f'\033[31m"{source} (skipping - invalid)"\033[34m'
for _norm, source in normalized
]
)
out(f"Sources to be formatted: {srcs_string}", fg="blue")
if config:
config_source = ctx.get_parameter_source("config")
user_level_config = str(find_user_pyproject_toml())
if config == user_level_config:
out(
(
"Using configuration from user-level config at "
f"'{user_level_config}'."
),
fg="blue",
)
elif config_source in (
ParameterSource.DEFAULT,
ParameterSource.DEFAULT_MAP,
):
out("Using configuration from project root.", fg="blue")
else:
out(f"Using configuration in '{config}'.", fg="blue")
error_msg = "Oh no! 💥 💔 💥"
if (
required_version
and required_version != __version__
and required_version != __version__.split(".")[0]
):
err(
f"{error_msg} The required version `{required_version}` does not match"
f" the running version `{__version__}`!"
)
ctx.exit(1)
if ipynb and pyi:
err("Cannot pass both `pyi` and `ipynb` flags!")
ctx.exit(1)
write_back = WriteBack.from_configuration(check=check, diff=diff, color=color)
if target_version:
versions = set(target_version)
else:
# We'll autodetect later.
versions = set()
mode = Mode(
target_versions=versions,
line_length=line_length,
is_pyi=pyi,
is_ipynb=ipynb,
skip_source_first_line=skip_source_first_line,
string_normalization=not skip_string_normalization,
magic_trailing_comma=not skip_magic_trailing_comma,
experimental_string_processing=experimental_string_processing,
preview=preview,
python_cell_magics=set(python_cell_magics),
)
if code is not None:
# Run in quiet mode by default with -c; the extra output isn't useful.
# You can still pass -v to get verbose output.
quiet = True
report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose)
if code is not None:
reformat_code(
content=code, fast=fast, write_back=write_back, mode=mode, report=report
)
else:
try:
sources = get_sources(
ctx=ctx,
src=src,
quiet=quiet,
verbose=verbose,
include=include,
exclude=exclude,
extend_exclude=extend_exclude,
force_exclude=force_exclude,
report=report,
stdin_filename=stdin_filename,
)
except GitWildMatchPatternError:
ctx.exit(1)
path_empty(
sources,
"No Python files are present to be formatted. Nothing to do 😴",
quiet,
verbose,
ctx,
)
if len(sources) == 1:
reformat_one(
src=sources.pop(),
fast=fast,
write_back=write_back,
mode=mode,
report=report,
)
else:
from black.concurrency import reformat_many
reformat_many(
sources=sources,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
workers=workers,
)
if verbose or not quiet:
if code is None and (verbose or report.change_count or report.failure_count):
out()
out(error_msg if report.return_code else "All done! ✨ 🍰 ✨")
if code is None:
click.echo(str(report), err=True)
ctx.exit(report.return_code)
def get_sources(
*,
ctx: click.Context,
src: Tuple[str, ...],
quiet: bool,
verbose: bool,
include: Pattern[str],
exclude: Optional[Pattern[str]],
extend_exclude: Optional[Pattern[str]],
force_exclude: Optional[Pattern[str]],
report: "Report",
stdin_filename: Optional[str],
) -> Set[Path]:
"""Compute the set of files to be formatted."""
sources: Set[Path] = set()
root = ctx.obj["root"]
using_default_exclude = exclude is None
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
gitignore: Optional[PathSpec] = None
root_gitignore = get_gitignore(root)
for s in src:
if s == "-" and stdin_filename:
p = Path(stdin_filename)
is_stdin = True
else:
p = Path(s)
is_stdin = False
if is_stdin or p.is_file():
normalized_path = normalize_path_maybe_ignore(p, ctx.obj["root"], report)
if normalized_path is None:
continue
normalized_path = "/" + normalized_path
# Hard-exclude any files that matches the `--force-exclude` regex.
if force_exclude:
force_exclude_match = force_exclude.search(normalized_path)
else:
force_exclude_match = None
if force_exclude_match and force_exclude_match.group(0):
report.path_ignored(p, "matches the --force-exclude regular expression")
continue
if is_stdin:
p = Path(f"{STDIN_PLACEHOLDER}{str(p)}")
if p.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
verbose=verbose, quiet=quiet
):
continue
sources.add(p)
elif p.is_dir():
if using_default_exclude:
gitignore = {
root: root_gitignore,
root / p: get_gitignore(p),
}
sources.update(
gen_python_files(
p.iterdir(),
ctx.obj["root"],
include,
exclude,
extend_exclude,
force_exclude,
report,
gitignore,
verbose=verbose,
quiet=quiet,
)
)
elif s == "-":
sources.add(p)
else:
err(f"invalid path: {s}")
return sources
def path_empty(
src: Sized, msg: str, quiet: bool, verbose: bool, ctx: click.Context
) -> None:
"""
Exit if there is no `src` provided for formatting
"""
if not src:
if verbose or not quiet:
out(msg)
ctx.exit(0)
def reformat_code(
content: str, fast: bool, write_back: WriteBack, mode: Mode, report: Report
) -> None:
"""
Reformat and print out `content` without spawning child processes.
Similar to `reformat_one`, but for string content.
`fast`, `write_back`, and `mode` options are passed to
:func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
"""
path = Path("<string>")
try:
changed = Changed.NO
if format_stdin_to_stdout(
content=content, fast=fast, write_back=write_back, mode=mode
):
changed = Changed.YES
report.done(path, changed)
except Exception as exc:
if report.verbose:
traceback.print_exc()
report.failed(path, str(exc))
# diff-shades depends on being to monkeypatch this function to operate. I know it's
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
@mypyc_attr(patchable=True)
def reformat_one(
src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
) -> None:
"""Reformat a single file under `src` without spawning child processes.
`fast`, `write_back`, and `mode` options are passed to
:func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
"""
try:
changed = Changed.NO
if str(src) == "-":
is_stdin = True
elif str(src).startswith(STDIN_PLACEHOLDER):
is_stdin = True
# Use the original name again in case we want to print something
# to the user
src = Path(str(src)[len(STDIN_PLACEHOLDER) :])
else:
is_stdin = False
if is_stdin:
if src.suffix == ".pyi":
mode = replace(mode, is_pyi=True)
elif src.suffix == ".ipynb":
mode = replace(mode, is_ipynb=True)
if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
changed = Changed.YES
else:
cache: Cache = {}
if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
cache = read_cache(mode)
res_src = src.resolve()
res_src_s = str(res_src)
if res_src_s in cache and cache[res_src_s] == get_cache_info(res_src):
changed = Changed.CACHED
if changed is not Changed.CACHED and format_file_in_place(
src, fast=fast, write_back=write_back, mode=mode
):
changed = Changed.YES
if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
write_back is WriteBack.CHECK and changed is Changed.NO
):
write_cache(cache, [src], mode)
report.done(src, changed)
except Exception as exc:
if report.verbose:
traceback.print_exc()
report.failed(src, str(exc))
def format_file_in_place(
src: Path,
fast: bool,
mode: Mode,
write_back: WriteBack = WriteBack.NO,
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
) -> bool:
"""Format file under `src` path. Return True if changed.
If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
code to the file.
`mode` and `fast` options are passed to :func:`format_file_contents`.
"""
if src.suffix == ".pyi":
mode = replace(mode, is_pyi=True)
elif src.suffix == ".ipynb":
mode = replace(mode, is_ipynb=True)
then = datetime.utcfromtimestamp(src.stat().st_mtime)
header = b""
with open(src, "rb") as buf:
if mode.skip_source_first_line:
header = buf.readline()
src_contents, encoding, newline = decode_bytes(buf.read())
try:
dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
except NothingChanged:
return False
except JSONDecodeError:
raise ValueError(
f"File '{src}' cannot be parsed as valid Jupyter notebook."
) from None
src_contents = header.decode(encoding) + src_contents
dst_contents = header.decode(encoding) + dst_contents
if write_back == WriteBack.YES:
with open(src, "w", encoding=encoding, newline=newline) as f:
f.write(dst_contents)
elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
now = datetime.utcnow()
src_name = f"{src}\t{then} +0000"
dst_name = f"{src}\t{now} +0000"
if mode.is_ipynb:
diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name)
else:
diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
if write_back == WriteBack.COLOR_DIFF:
diff_contents = color_diff(diff_contents)
with lock or nullcontext():
f = io.TextIOWrapper(
sys.stdout.buffer,
encoding=encoding,
newline=newline,
write_through=True,
)
f = wrap_stream_for_windows(f)
f.write(diff_contents)
f.detach()
return True
def format_stdin_to_stdout(
fast: bool,
*,
content: Optional[str] = None,
write_back: WriteBack = WriteBack.NO,
mode: Mode,
) -> bool:
"""Format file on stdin. Return True if changed.
If content is None, it's read from sys.stdin.
If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
write a diff to stdout. The `mode` argument is passed to
:func:`format_file_contents`.
"""
then = datetime.utcnow()
if content is None:
src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
else:
src, encoding, newline = content, "utf-8", ""
dst = src
try:
dst = format_file_contents(src, fast=fast, mode=mode)
return True
except NothingChanged:
return False
finally:
f = io.TextIOWrapper(
sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
)
if write_back == WriteBack.YES:
# Make sure there's a newline after the content
if dst and dst[-1] != "\n":
dst += "\n"
f.write(dst)
elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
now = datetime.utcnow()
src_name = f"STDIN\t{then} +0000"
dst_name = f"STDOUT\t{now} +0000"
d = diff(src, dst, src_name, dst_name)
if write_back == WriteBack.COLOR_DIFF:
d = color_diff(d)
f = wrap_stream_for_windows(f)
f.write(d)
f.detach()
def check_stability_and_equivalence(
src_contents: str, dst_contents: str, *, mode: Mode
) -> None:
"""Perform stability and equivalence checks.
Raise AssertionError if source and destination contents are not
equivalent, or if a second pass of the formatter would format the
content differently.
"""
assert_equivalent(src_contents, dst_contents)
assert_stable(src_contents, dst_contents, mode=mode)
def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
"""Reformat contents of a file and return new contents.
If `fast` is False, additionally confirm that the reformatted code is
valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
`mode` is passed to :func:`format_str`.
"""
if not mode.preview and not src_contents.strip():
raise NothingChanged
if mode.is_ipynb:
dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode)
else:
dst_contents = format_str(src_contents, mode=mode)
if src_contents == dst_contents:
raise NothingChanged
if not fast and not mode.is_ipynb:
# Jupyter notebooks will already have been checked above.
check_stability_and_equivalence(src_contents, dst_contents, mode=mode)
return dst_contents
def validate_cell(src: str, mode: Mode) -> None:
"""Check that cell does not already contain TransformerManager transformations,
or non-Python cell magics, which might cause tokenizer_rt to break because of
indentations.
If a cell contains ``!ls``, then it'll be transformed to
``get_ipython().system('ls')``. However, if the cell originally contained
``get_ipython().system('ls')``, then it would get transformed in the same way:
>>> TransformerManager().transform_cell("get_ipython().system('ls')")
"get_ipython().system('ls')\n"
>>> TransformerManager().transform_cell("!ls")
"get_ipython().system('ls')\n"
Due to the impossibility of safely roundtripping in such situations, cells
containing transformed magics will be ignored.
"""
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
raise NothingChanged
if (
src[:2] == "%%"
and src.split()[0][2:] not in PYTHON_CELL_MAGICS | mode.python_cell_magics
):
raise NothingChanged
def format_cell(src: str, *, fast: bool, mode: Mode) -> str:
"""Format code in given cell of Jupyter notebook.
General idea is:
- if cell has trailing semicolon, remove it;
- if cell has IPython magics, mask them;
- format cell;
- reinstate IPython magics;
- reinstate trailing semicolon (if originally present);
- strip trailing newlines.
Cells with syntax errors will not be processed, as they
could potentially be automagics or multi-line magics, which
are currently not supported.
"""
validate_cell(src, mode)
src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon(
src
)
try:
masked_src, replacements = mask_cell(src_without_trailing_semicolon)
except SyntaxError:
raise NothingChanged from None
masked_dst = format_str(masked_src, mode=mode)
if not fast:
check_stability_and_equivalence(masked_src, masked_dst, mode=mode)
dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements)
dst = put_trailing_semicolon_back(
dst_without_trailing_semicolon, has_trailing_semicolon
)
dst = dst.rstrip("\n")
if dst == src:
raise NothingChanged from None
return dst
def validate_metadata(nb: MutableMapping[str, Any]) -> None:
"""If notebook is marked as non-Python, don't format it.
All notebook metadata fields are optional, see
https://nbformat.readthedocs.io/en/latest/format_description.html. So
if a notebook has empty metadata, we will try to parse it anyway.
"""
language = nb.get("metadata", {}).get("language_info", {}).get("name", None)
if language is not None and language != "python":
raise NothingChanged from None
def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
"""Format Jupyter notebook.
Operate cell-by-cell, only on code cells, only for Python notebooks.
If the ``.ipynb`` originally had a trailing newline, it'll be preserved.
"""
if mode.preview and not src_contents:
raise NothingChanged
trailing_newline = src_contents[-1] == "\n"
modified = False
nb = json.loads(src_contents)
validate_metadata(nb)
for cell in nb["cells"]:
if cell.get("cell_type", None) == "code":
try:
src = "".join(cell["source"])
dst = format_cell(src, fast=fast, mode=mode)
except NothingChanged:
pass
else:
cell["source"] = dst.splitlines(keepends=True)
modified = True
if modified:
dst_contents = json.dumps(nb, indent=1, ensure_ascii=False)
if trailing_newline:
dst_contents = dst_contents + "\n"
return dst_contents
else:
raise NothingChanged
def format_str(src_contents: str, *, mode: Mode) -> str:
"""Reformat a string and return new contents.
`mode` determines formatting options, such as how many characters per line are
allowed. Example:
>>> import black
>>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode()))
def f(arg: str = "") -> None:
...
A more complex example:
>>> print(
... black.format_str(
... "def f(arg:str='')->None: hey",
... mode=black.Mode(
... target_versions={black.TargetVersion.PY36},
... line_length=10,
... string_normalization=False,
... is_pyi=False,
... ),
... ),
... )
def f(
arg: str = '',
) -> None:
hey
"""
dst_contents = _format_str_once(src_contents, mode=mode)
# Forced second pass to work around optional trailing commas (becoming
# forced trailing commas on pass 2) interacting differently with optional
# parentheses. Admittedly ugly.
if src_contents != dst_contents:
return _format_str_once(dst_contents, mode=mode)
return dst_contents
def _format_str_once(src_contents: str, *, mode: Mode) -> str:
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
dst_blocks: List[LinesBlock] = []
if mode.target_versions:
versions = mode.target_versions
else:
future_imports = get_future_imports(src_node)
versions = detect_target_versions(src_node, future_imports=future_imports)
normalize_fmt_off(src_node, preview=mode.preview)
lines = LineGenerator(mode=mode)
elt = EmptyLineTracker(mode=mode)
split_line_features = {
feature
for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
if supports_feature(versions, feature)
}
block: Optional[LinesBlock] = None
for current_line in lines.visit(src_node):
block = elt.maybe_empty_lines(current_line)
dst_blocks.append(block)
for line in transform_line(
current_line, mode=mode, features=split_line_features
):
block.content_lines.append(str(line))
if dst_blocks:
dst_blocks[-1].after = 0
dst_contents = []
for block in dst_blocks:
dst_contents.extend(block.all_lines())
if mode.preview and not dst_contents:
# Use decode_bytes to retrieve the correct source newline (CRLF or LF),
# and check if normalized_content has more than one line
normalized_content, _, newline = decode_bytes(src_contents.encode("utf-8"))
if "\n" in normalized_content:
return newline
return ""
return "".join(dst_contents)
def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
"""Return a tuple of (decoded_contents, encoding, newline).
`newline` is either CRLF or LF but `decoded_contents` is decoded with
universal newlines (i.e. only contains LF).
"""
srcbuf = io.BytesIO(src)
encoding, lines = tokenize.detect_encoding(srcbuf.readline)
if not lines:
return "", encoding, "\n"
newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
srcbuf.seek(0)
with io.TextIOWrapper(srcbuf, encoding) as tiow:
return tiow.read(), encoding, newline
def get_features_used( # noqa: C901
node: Node, *, future_imports: Optional[Set[str]] = None
) -> Set[Feature]:
"""Return a set of (relatively) new Python features used in this file.
Currently looking for:
- f-strings;
- self-documenting expressions in f-strings (f"{x=}");
- underscores in numeric literals;
- trailing commas after * or ** in function signatures and calls;
- positional only arguments in function signatures and lambdas;
- assignment expression;
- relaxed decorator syntax;
- usage of __future__ flags (annotations);
- print / exec statements;
"""
features: Set[Feature] = set()
if future_imports:
features |= {
FUTURE_FLAG_TO_FEATURE[future_import]
for future_import in future_imports
if future_import in FUTURE_FLAG_TO_FEATURE
}
for n in node.pre_order():
if is_string_token(n):
value_head = n.value[:2]
if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
features.add(Feature.F_STRINGS)
if Feature.DEBUG_F_STRINGS not in features:
for span_beg, span_end in iter_fexpr_spans(n.value):
if n.value[span_beg : span_end - 1].rstrip().endswith("="):
features.add(Feature.DEBUG_F_STRINGS)
break
elif is_number_token(n):
if "_" in n.value:
features.add(Feature.NUMERIC_UNDERSCORES)
elif n.type == token.SLASH:
if n.parent and n.parent.type in {
syms.typedargslist,
syms.arglist,
syms.varargslist,
}:
features.add(Feature.POS_ONLY_ARGUMENTS)
elif n.type == token.COLONEQUAL:
features.add(Feature.ASSIGNMENT_EXPRESSIONS)
elif n.type == syms.decorator:
if len(n.children) > 1 and not is_simple_decorator_expression(
n.children[1]
):
features.add(Feature.RELAXED_DECORATORS)
elif (
n.type in {syms.typedargslist, syms.arglist}
and n.children
and n.children[-1].type == token.COMMA
):
if n.type == syms.typedargslist:
feature = Feature.TRAILING_COMMA_IN_DEF
else:
feature = Feature.TRAILING_COMMA_IN_CALL
for ch in n.children:
if ch.type in STARS:
features.add(feature)
if ch.type == syms.argument:
for argch in ch.children:
if argch.type in STARS:
features.add(feature)
elif (
n.type in {syms.return_stmt, syms.yield_expr}
and len(n.children) >= 2
and n.children[1].type == syms.testlist_star_expr
and any(child.type == syms.star_expr for child in n.children[1].children)
):
features.add(Feature.UNPACKING_ON_FLOW)
elif (
n.type == syms.annassign
and len(n.children) >= 4
and n.children[3].type == syms.testlist_star_expr
):
features.add(Feature.ANN_ASSIGN_EXTENDED_RHS)
elif (
n.type == syms.except_clause
and len(n.children) >= 2
and n.children[1].type == token.STAR
):
features.add(Feature.EXCEPT_STAR)
elif n.type in {syms.subscriptlist, syms.trailer} and any(
child.type == syms.star_expr for child in n.children
):
features.add(Feature.VARIADIC_GENERICS)
elif (
n.type == syms.tname_star
and len(n.children) == 3
and n.children[2].type == syms.star_expr
):
features.add(Feature.VARIADIC_GENERICS)
return features
def detect_target_versions(
node: Node, *, future_imports: Optional[Set[str]] = None
) -> Set[TargetVersion]:
"""Detect the version to target based on the nodes used."""
features = get_features_used(node, future_imports=future_imports)
return {
version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
}
def get_future_imports(node: Node) -> Set[str]:
"""Return a set of __future__ imports in the file."""
imports: Set[str] = set()
def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
for child in children:
if isinstance(child, Leaf):
if child.type == token.NAME:
yield child.value
elif child.type == syms.import_as_name:
orig_name = child.children[0]
assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
yield orig_name.value
elif child.type == syms.import_as_names:
yield from get_imports_from_children(child.children)
else:
raise AssertionError("Invalid syntax parsing imports")
for child in node.children:
if child.type != syms.simple_stmt:
break
first_child = child.children[0]
if isinstance(first_child, Leaf):
# Continue looking if we see a docstring; otherwise stop.
if (
len(child.children) == 2
and first_child.type == token.STRING
and child.children[1].type == token.NEWLINE
):
continue
break
elif first_child.type == syms.import_from:
module_name = first_child.children[1]
if not isinstance(module_name, Leaf) or module_name.value != "__future__":
break
imports |= set(get_imports_from_children(first_child.children[3:]))
else:
break
return imports
def assert_equivalent(src: str, dst: str) -> None:
"""Raise AssertionError if `src` and `dst` aren't equivalent."""
try:
src_ast = parse_ast(src)
except Exception as exc:
raise AssertionError(
"cannot use --safe with this file; failed to parse source file AST: "
f"{exc}\n"
"This could be caused by running Black with an older Python version "
"that does not support new syntax used in your source file."
) from exc
try:
dst_ast = parse_ast(dst)
except Exception as exc:
log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
raise AssertionError(
f"INTERNAL ERROR: Black produced invalid code: {exc}. "
"Please report a bug on https://github.com/psf/black/issues. "
f"This invalid output might be helpful: {log}"
) from None
src_ast_str = "\n".join(stringify_ast(src_ast))
dst_ast_str = "\n".join(stringify_ast(dst_ast))
if src_ast_str != dst_ast_str:
log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
raise AssertionError(
"INTERNAL ERROR: Black produced code that is not equivalent to the"
" source. Please report a bug on "
f"https://github.com/psf/black/issues. This diff might be helpful: {log}"
) from None
def assert_stable(src: str, dst: str, mode: Mode) -> None:
"""Raise AssertionError if `dst` reformats differently the second time."""
# We shouldn't call format_str() here, because that formats the string
# twice and may hide a bug where we bounce back and forth between two
# versions.
newdst = _format_str_once(dst, mode=mode)
if dst != newdst:
log = dump_to_file(
str(mode),
diff(src, dst, "source", "first pass"),
diff(dst, newdst, "first pass", "second pass"),
)
raise AssertionError(
"INTERNAL ERROR: Black produced different code on the second pass of the"
" formatter. Please report a bug on https://github.com/psf/black/issues."
f" This diff might be helpful: {log}"
) from None
@contextmanager
def nullcontext() -> Iterator[None]:
"""Return an empty context manager.
To be used like `nullcontext` in Python 3.7.
"""
yield
def patch_click() -> None:
"""Make Click not crash on Python 3.6 with LANG=C.
On certain misconfigured environments, Python 3 selects the ASCII encoding as the
default which restricts paths that it can access during the lifetime of the
application. Click refuses to work in this scenario by raising a RuntimeError.
In case of Black the likelihood that non-ASCII characters are going to be used in
file paths is minimal since it's Python source code. Moreover, this crash was
spurious on Python 3.7 thanks to PEP 538 and PEP 540.
"""
modules: List[Any] = []
try:
from click import core
except ImportError:
pass
else:
modules.append(core)
try:
# Removed in Click 8.1.0 and newer; we keep this around for users who have
# older versions installed.
from click import _unicodefun # type: ignore
except ImportError:
pass
else:
modules.append(_unicodefun)
for module in modules:
if hasattr(module, "_verify_python3_env"):
module._verify_python3_env = lambda: None
if hasattr(module, "_verify_python_env"):
module._verify_python_env = lambda: None
def patched_main() -> None:
# PyInstaller patches multiprocessing to need freeze_support() even in non-Windows
# environments so just assume we always need to call it if frozen.
if getattr(sys, "frozen", False):
from multiprocessing import freeze_support
freeze_support()
patch_click()
main()
if __name__ == "__main__":
patched_main()
| mit | 15a44675e7b1ddb29c7816988e8aa77b | 30.998591 | 88 | 0.595582 | 3.907237 | false | false | false | false |
psf/black | src/black/report.py | 1 | 3451 | """
Summarize Black runs to users.
"""
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from click import style
from black.output import err, out
class Changed(Enum):
NO = 0
CACHED = 1
YES = 2
class NothingChanged(UserWarning):
"""Raised when reformatted code is the same as source."""
@dataclass
class Report:
"""Provides a reformatting counter. Can be rendered with `str(report)`."""
check: bool = False
diff: bool = False
quiet: bool = False
verbose: bool = False
change_count: int = 0
same_count: int = 0
failure_count: int = 0
def done(self, src: Path, changed: Changed) -> None:
"""Increment the counter for successful reformatting. Write out a message."""
if changed is Changed.YES:
reformatted = "would reformat" if self.check or self.diff else "reformatted"
if self.verbose or not self.quiet:
out(f"{reformatted} {src}")
self.change_count += 1
else:
if self.verbose:
if changed is Changed.NO:
msg = f"{src} already well formatted, good job."
else:
msg = f"{src} wasn't modified on disk since last run."
out(msg, bold=False)
self.same_count += 1
def failed(self, src: Path, message: str) -> None:
"""Increment the counter for failed reformatting. Write out a message."""
err(f"error: cannot format {src}: {message}")
self.failure_count += 1
def path_ignored(self, path: Path, message: str) -> None:
if self.verbose:
out(f"{path} ignored: {message}", bold=False)
@property
def return_code(self) -> int:
"""Return the exit code that the app should use.
This considers the current state of changed files and failures:
- if there were any failures, return 123;
- if any files were changed and --check is being used, return 1;
- otherwise return 0.
"""
# According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
# 126 we have special return codes reserved by the shell.
if self.failure_count:
return 123
elif self.change_count and self.check:
return 1
return 0
def __str__(self) -> str:
"""Render a color report of the current state.
Use `click.unstyle` to remove colors.
"""
if self.check or self.diff:
reformatted = "would be reformatted"
unchanged = "would be left unchanged"
failed = "would fail to reformat"
else:
reformatted = "reformatted"
unchanged = "left unchanged"
failed = "failed to reformat"
report = []
if self.change_count:
s = "s" if self.change_count > 1 else ""
report.append(
style(f"{self.change_count} file{s} ", bold=True, fg="blue")
+ style(f"{reformatted}", bold=True)
)
if self.same_count:
s = "s" if self.same_count > 1 else ""
report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged)
if self.failure_count:
s = "s" if self.failure_count > 1 else ""
report.append(style(f"{self.failure_count} file{s} {failed}", fg="red"))
return ", ".join(report) + "."
| mit | 3463b8f861795ca5967ea9700e3b742f | 31.556604 | 88 | 0.572877 | 4.084024 | false | false | false | false |
psf/black | src/black/trans.py | 1 | 82244 | """
String transformers that can split and merge strings.
"""
import re
import sys
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass
from typing import (
Any,
Callable,
ClassVar,
Collection,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
if sys.version_info < (3, 8):
from typing_extensions import Final, Literal
else:
from typing import Literal, Final
from mypy_extensions import trait
from black.brackets import BracketMatchError
from black.comments import contains_pragma_comment
from black.lines import Line, append_leaves
from black.mode import Feature
from black.nodes import (
CLOSING_BRACKETS,
OPENING_BRACKETS,
STANDALONE_COMMENT,
is_empty_lpar,
is_empty_par,
is_empty_rpar,
parent_type,
replace_child,
syms,
)
from black.rusty import Err, Ok, Result
from black.strings import (
assert_is_leaf_string,
get_string_prefix,
has_triple_quotes,
normalize_string_quotes,
)
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
class CannotTransform(Exception):
"""Base class for errors raised by Transformers."""
# types
T = TypeVar("T")
LN = Union[Leaf, Node]
Transformer = Callable[[Line, Collection[Feature]], Iterator[Line]]
Index = int
NodeType = int
ParserState = int
StringID = int
TResult = Result[T, CannotTransform] # (T)ransform Result
TMatchResult = TResult[Index]
def TErr(err_msg: str) -> Err[CannotTransform]:
"""(T)ransform Err
Convenience function used when working with the TResult type.
"""
cant_transform = CannotTransform(err_msg)
return Err(cant_transform)
def hug_power_op(line: Line, features: Collection[Feature]) -> Iterator[Line]:
"""A transformer which normalizes spacing around power operators."""
# Performance optimization to avoid unnecessary Leaf clones and other ops.
for leaf in line.leaves:
if leaf.type == token.DOUBLESTAR:
break
else:
raise CannotTransform("No doublestar token was found in the line.")
def is_simple_lookup(index: int, step: Literal[1, -1]) -> bool:
# Brackets and parentheses indicate calls, subscripts, etc. ...
# basically stuff that doesn't count as "simple". Only a NAME lookup
# or dotted lookup (eg. NAME.NAME) is OK.
if step == -1:
disallowed = {token.RPAR, token.RSQB}
else:
disallowed = {token.LPAR, token.LSQB}
while 0 <= index < len(line.leaves):
current = line.leaves[index]
if current.type in disallowed:
return False
if current.type not in {token.NAME, token.DOT} or current.value == "for":
# If the current token isn't disallowed, we'll assume this is simple as
# only the disallowed tokens are semantically attached to this lookup
# expression we're checking. Also, stop early if we hit the 'for' bit
# of a comprehension.
return True
index += step
return True
def is_simple_operand(index: int, kind: Literal["base", "exponent"]) -> bool:
# An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple
# lookup (see above), with or without a preceding unary operator.
start = line.leaves[index]
if start.type in {token.NAME, token.NUMBER}:
return is_simple_lookup(index, step=(1 if kind == "exponent" else -1))
if start.type in {token.PLUS, token.MINUS, token.TILDE}:
if line.leaves[index + 1].type in {token.NAME, token.NUMBER}:
# step is always one as bases with a preceding unary op will be checked
# for simplicity starting from the next token (so it'll hit the check
# above).
return is_simple_lookup(index + 1, step=1)
return False
new_line = line.clone()
should_hug = False
for idx, leaf in enumerate(line.leaves):
new_leaf = leaf.clone()
if should_hug:
new_leaf.prefix = ""
should_hug = False
should_hug = (
(0 < idx < len(line.leaves) - 1)
and leaf.type == token.DOUBLESTAR
and is_simple_operand(idx - 1, kind="base")
and line.leaves[idx - 1].value != "lambda"
and is_simple_operand(idx + 1, kind="exponent")
)
if should_hug:
new_leaf.prefix = ""
# We have to be careful to make a new line properly:
# - bracket related metadata must be maintained (handled by Line.append)
# - comments need to copied over, updating the leaf IDs they're attached to
new_line.append(new_leaf, preformatted=True)
for comment_leaf in line.comments_after(leaf):
new_line.append(comment_leaf, preformatted=True)
yield new_line
class StringTransformer(ABC):
"""
An implementation of the Transformer protocol that relies on its
subclasses overriding the template methods `do_match(...)` and
`do_transform(...)`.
This Transformer works exclusively on strings (for example, by merging
or splitting them).
The following sections can be found among the docstrings of each concrete
StringTransformer subclass.
Requirements:
Which requirements must be met of the given Line for this
StringTransformer to be applied?
Transformations:
If the given Line meets all of the above requirements, which string
transformations can you expect to be applied to it by this
StringTransformer?
Collaborations:
What contractual agreements does this StringTransformer have with other
StringTransfomers? Such collaborations should be eliminated/minimized
as much as possible.
"""
__name__: Final = "StringTransformer"
# Ideally this would be a dataclass, but unfortunately mypyc breaks when used with
# `abc.ABC`.
def __init__(self, line_length: int, normalize_strings: bool) -> None:
self.line_length = line_length
self.normalize_strings = normalize_strings
@abstractmethod
def do_match(self, line: Line) -> TMatchResult:
"""
Returns:
* Ok(string_idx) such that `line.leaves[string_idx]` is our target
string, if a match was able to be made.
OR
* Err(CannotTransform), if a match was not able to be made.
"""
@abstractmethod
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
"""
Yields:
* Ok(new_line) where new_line is the new transformed line.
OR
* Err(CannotTransform) if the transformation failed for some reason. The
`do_match(...)` template method should usually be used to reject
the form of the given Line, but in some cases it is difficult to
know whether or not a Line meets the StringTransformer's
requirements until the transformation is already midway.
Side Effects:
This method should NOT mutate @line directly, but it MAY mutate the
Line's underlying Node structure. (WARNING: If the underlying Node
structure IS altered, then this method should NOT be allowed to
yield an CannotTransform after that point.)
"""
def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]:
"""
StringTransformer instances have a call signature that mirrors that of
the Transformer type.
Raises:
CannotTransform(...) if the concrete StringTransformer class is unable
to transform @line.
"""
# Optimization to avoid calling `self.do_match(...)` when the line does
# not contain any string.
if not any(leaf.type == token.STRING for leaf in line.leaves):
raise CannotTransform("There are no strings in this line.")
match_result = self.do_match(line)
if isinstance(match_result, Err):
cant_transform = match_result.err()
raise CannotTransform(
f"The string transformer {self.__class__.__name__} does not recognize"
" this line as one that it can transform."
) from cant_transform
string_idx = match_result.ok()
for line_result in self.do_transform(line, string_idx):
if isinstance(line_result, Err):
cant_transform = line_result.err()
raise CannotTransform(
"StringTransformer failed while attempting to transform string."
) from cant_transform
line = line_result.ok()
yield line
@dataclass
class CustomSplit:
"""A custom (i.e. manual) string split.
A single CustomSplit instance represents a single substring.
Examples:
Consider the following string:
```
"Hi there friend."
" This is a custom"
f" string {split}."
```
This string will correspond to the following three CustomSplit instances:
```
CustomSplit(False, 16)
CustomSplit(False, 17)
CustomSplit(True, 16)
```
"""
has_prefix: bool
break_idx: int
@trait
class CustomSplitMapMixin:
"""
This mixin class is used to map merged strings to a sequence of
CustomSplits, which will then be used to re-split the strings iff none of
the resultant substrings go over the configured max line length.
"""
_Key: ClassVar = Tuple[StringID, str]
_CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict(
tuple
)
@staticmethod
def _get_key(string: str) -> "CustomSplitMapMixin._Key":
"""
Returns:
A unique identifier that is used internally to map @string to a
group of custom splits.
"""
return (id(string), string)
def add_custom_splits(
self, string: str, custom_splits: Iterable[CustomSplit]
) -> None:
"""Custom Split Map Setter Method
Side Effects:
Adds a mapping from @string to the custom splits @custom_splits.
"""
key = self._get_key(string)
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
def pop_custom_splits(self, string: str) -> List[CustomSplit]:
"""Custom Split Map Getter Method
Returns:
* A list of the custom splits that are mapped to @string, if any
exist.
OR
* [], otherwise.
Side Effects:
Deletes the mapping between @string and its associated custom
splits (which are returned to the caller).
"""
key = self._get_key(string)
custom_splits = self._CUSTOM_SPLIT_MAP[key]
del self._CUSTOM_SPLIT_MAP[key]
return list(custom_splits)
def has_custom_splits(self, string: str) -> bool:
"""
Returns:
True iff @string is associated with a set of custom splits.
"""
key = self._get_key(string)
return key in self._CUSTOM_SPLIT_MAP
class StringMerger(StringTransformer, CustomSplitMapMixin):
"""StringTransformer that merges strings together.
Requirements:
(A) The line contains adjacent strings such that ALL of the validation checks
listed in StringMerger.__validate_msg(...)'s docstring pass.
OR
(B) The line contains a string which uses line continuation backslashes.
Transformations:
Depending on which of the two requirements above where met, either:
(A) The string group associated with the target string is merged.
OR
(B) All line-continuation backslashes are removed from the target string.
Collaborations:
StringMerger provides custom split information to StringSplitter.
"""
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
if (
leaf.type == token.STRING
and is_valid_index(i + 1)
and LL[i + 1].type == token.STRING
):
return Ok(i)
if leaf.type == token.STRING and "\\\n" in leaf.value:
return Ok(i)
return TErr("This line has no strings that need merging.")
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
new_line = line
rblc_result = self._remove_backslash_line_continuation_chars(
new_line, string_idx
)
if isinstance(rblc_result, Ok):
new_line = rblc_result.ok()
msg_result = self._merge_string_group(new_line, string_idx)
if isinstance(msg_result, Ok):
new_line = msg_result.ok()
if isinstance(rblc_result, Err) and isinstance(msg_result, Err):
msg_cant_transform = msg_result.err()
rblc_cant_transform = rblc_result.err()
cant_transform = CannotTransform(
"StringMerger failed to merge any strings in this line."
)
# Chain the errors together using `__cause__`.
msg_cant_transform.__cause__ = rblc_cant_transform
cant_transform.__cause__ = msg_cant_transform
yield Err(cant_transform)
else:
yield Ok(new_line)
@staticmethod
def _remove_backslash_line_continuation_chars(
line: Line, string_idx: int
) -> TResult[Line]:
"""
Merge strings that were split across multiple lines using
line-continuation backslashes.
Returns:
Ok(new_line), if @line contains backslash line-continuation
characters.
OR
Err(CannotTransform), otherwise.
"""
LL = line.leaves
string_leaf = LL[string_idx]
if not (
string_leaf.type == token.STRING
and "\\\n" in string_leaf.value
and not has_triple_quotes(string_leaf.value)
):
return TErr(
f"String leaf {string_leaf} does not contain any backslash line"
" continuation characters."
)
new_line = line.clone()
new_line.comments = line.comments.copy()
append_leaves(new_line, line, LL)
new_string_leaf = new_line.leaves[string_idx]
new_string_leaf.value = new_string_leaf.value.replace("\\\n", "")
return Ok(new_line)
def _merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]:
"""
Merges string group (i.e. set of adjacent strings) where the first
string in the group is `line.leaves[string_idx]`.
Returns:
Ok(new_line), if ALL of the validation checks found in
__validate_msg(...) pass.
OR
Err(CannotTransform), otherwise.
"""
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
vresult = self._validate_msg(line, string_idx)
if isinstance(vresult, Err):
return vresult
# If the string group is wrapped inside an Atom node, we must make sure
# to later replace that Atom with our new (merged) string leaf.
atom_node = LL[string_idx].parent
# We will place BREAK_MARK in between every two substrings that we
# merge. We will then later go through our final result and use the
# various instances of BREAK_MARK we find to add the right values to
# the custom split map.
BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@"
QUOTE = LL[string_idx].value[-1]
def make_naked(string: str, string_prefix: str) -> str:
"""Strip @string (i.e. make it a "naked" string)
Pre-conditions:
* assert_is_leaf_string(@string)
Returns:
A string that is identical to @string except that
@string_prefix has been stripped, the surrounding QUOTE
characters have been removed, and any remaining QUOTE
characters have been escaped.
"""
assert_is_leaf_string(string)
RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)"
naked_string = string[len(string_prefix) + 1 : -1]
naked_string = re.sub(
"(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string
)
return naked_string
# Holds the CustomSplit objects that will later be added to the custom
# split map.
custom_splits = []
# Temporary storage for the 'has_prefix' part of the CustomSplit objects.
prefix_tracker = []
# Sets the 'prefix' variable. This is the prefix that the final merged
# string will have.
next_str_idx = string_idx
prefix = ""
while (
not prefix
and is_valid_index(next_str_idx)
and LL[next_str_idx].type == token.STRING
):
prefix = get_string_prefix(LL[next_str_idx].value).lower()
next_str_idx += 1
# The next loop merges the string group. The final string will be
# contained in 'S'.
#
# The following convenience variables are used:
#
# S: string
# NS: naked string
# SS: next string
# NSS: naked next string
S = ""
NS = ""
num_of_strings = 0
next_str_idx = string_idx
while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING:
num_of_strings += 1
SS = LL[next_str_idx].value
next_prefix = get_string_prefix(SS).lower()
# If this is an f-string group but this substring is not prefixed
# with 'f'...
if "f" in prefix and "f" not in next_prefix:
# Then we must escape any braces contained in this substring.
SS = re.sub(r"(\{|\})", r"\1\1", SS)
NSS = make_naked(SS, next_prefix)
has_prefix = bool(next_prefix)
prefix_tracker.append(has_prefix)
S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE
NS = make_naked(S, prefix)
next_str_idx += 1
# Take a note on the index of the non-STRING leaf.
non_string_idx = next_str_idx
S_leaf = Leaf(token.STRING, S)
if self.normalize_strings:
S_leaf.value = normalize_string_quotes(S_leaf.value)
# Fill the 'custom_splits' list with the appropriate CustomSplit objects.
temp_string = S_leaf.value[len(prefix) + 1 : -1]
for has_prefix in prefix_tracker:
mark_idx = temp_string.find(BREAK_MARK)
assert (
mark_idx >= 0
), "Logic error while filling the custom string breakpoint cache."
temp_string = temp_string[mark_idx + len(BREAK_MARK) :]
breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1
custom_splits.append(CustomSplit(has_prefix, breakpoint_idx))
string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, ""))
if atom_node is not None:
# If not all children of the atom node are merged (this can happen
# when there is a standalone comment in the middle) ...
if non_string_idx - string_idx < len(atom_node.children):
# We need to replace the old STRING leaves with the new string leaf.
first_child_idx = LL[string_idx].remove()
for idx in range(string_idx + 1, non_string_idx):
LL[idx].remove()
if first_child_idx is not None:
atom_node.insert_child(first_child_idx, string_leaf)
else:
# Else replace the atom node with the new string leaf.
replace_child(atom_node, string_leaf)
# Build the final line ('new_line') that this method will later return.
new_line = line.clone()
for i, leaf in enumerate(LL):
if i == string_idx:
new_line.append(string_leaf)
if string_idx <= i < string_idx + num_of_strings:
for comment_leaf in line.comments_after(LL[i]):
new_line.append(comment_leaf, preformatted=True)
continue
append_leaves(new_line, line, [leaf])
self.add_custom_splits(string_leaf.value, custom_splits)
return Ok(new_line)
@staticmethod
def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
"""Validate (M)erge (S)tring (G)roup
Transform-time string validation logic for __merge_string_group(...).
Returns:
* Ok(None), if ALL validation checks (listed below) pass.
OR
* Err(CannotTransform), if any of the following are true:
- The target string group does not contain ANY stand-alone comments.
- The target string is not in a string group (i.e. it has no
adjacent strings).
- The string group has more than one inline comment.
- The string group has an inline comment that appears to be a pragma.
- The set of all string prefixes in the string group is of
length greater than one and is not equal to {"", "f"}.
- The string group consists of raw strings.
"""
# We first check for "inner" stand-alone comments (i.e. stand-alone
# comments that have a string leaf before them AND after them).
for inc in [1, -1]:
i = string_idx
found_sa_comment = False
is_valid_index = is_valid_index_factory(line.leaves)
while is_valid_index(i) and line.leaves[i].type in [
token.STRING,
STANDALONE_COMMENT,
]:
if line.leaves[i].type == STANDALONE_COMMENT:
found_sa_comment = True
elif found_sa_comment:
return TErr(
"StringMerger does NOT merge string groups which contain "
"stand-alone comments."
)
i += inc
num_of_inline_string_comments = 0
set_of_prefixes = set()
num_of_strings = 0
for leaf in line.leaves[string_idx:]:
if leaf.type != token.STRING:
# If the string group is trailed by a comma, we count the
# comments trailing the comma to be one of the string group's
# comments.
if leaf.type == token.COMMA and id(leaf) in line.comments:
num_of_inline_string_comments += 1
break
if has_triple_quotes(leaf.value):
return TErr("StringMerger does NOT merge multiline strings.")
num_of_strings += 1
prefix = get_string_prefix(leaf.value).lower()
if "r" in prefix:
return TErr("StringMerger does NOT merge raw strings.")
set_of_prefixes.add(prefix)
if id(leaf) in line.comments:
num_of_inline_string_comments += 1
if contains_pragma_comment(line.comments[id(leaf)]):
return TErr("Cannot merge strings which have pragma comments.")
if num_of_strings < 2:
return TErr(
f"Not enough strings to merge (num_of_strings={num_of_strings})."
)
if num_of_inline_string_comments > 1:
return TErr(
f"Too many inline string comments ({num_of_inline_string_comments})."
)
if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
return TErr(f"Too many different prefixes ({set_of_prefixes}).")
return Ok(None)
class StringParenStripper(StringTransformer):
"""StringTransformer that strips surrounding parentheses from strings.
Requirements:
The line contains a string which is surrounded by parentheses and:
- The target string is NOT the only argument to a function call.
- The target string is NOT a "pointless" string.
- If the target string contains a PERCENT, the brackets are not
preceded or followed by an operator with higher precedence than
PERCENT.
Transformations:
The parentheses mentioned in the 'Requirements' section are stripped.
Collaborations:
StringParenStripper has its own inherent usefulness, but it is also
relied on to clean up the parentheses created by StringParenWrapper (in
the event that they are no longer needed).
"""
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
for idx, leaf in enumerate(LL):
# Should be a string...
if leaf.type != token.STRING:
continue
# If this is a "pointless" string...
if (
leaf.parent
and leaf.parent.parent
and leaf.parent.parent.type == syms.simple_stmt
):
continue
# Should be preceded by a non-empty LPAR...
if (
not is_valid_index(idx - 1)
or LL[idx - 1].type != token.LPAR
or is_empty_lpar(LL[idx - 1])
):
continue
# That LPAR should NOT be preceded by a function name or a closing
# bracket (which could be a function which returns a function or a
# list/dictionary that contains a function)...
if is_valid_index(idx - 2) and (
LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS
):
continue
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
next_idx = string_parser.parse(LL, string_idx)
# if the leaves in the parsed string include a PERCENT, we need to
# make sure the initial LPAR is NOT preceded by an operator with
# higher or equal precedence to PERCENT
if is_valid_index(idx - 2):
# mypy can't quite follow unless we name this
before_lpar = LL[idx - 2]
if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and (
(
before_lpar.type
in {
token.STAR,
token.AT,
token.SLASH,
token.DOUBLESLASH,
token.PERCENT,
token.TILDE,
token.DOUBLESTAR,
token.AWAIT,
token.LSQB,
token.LPAR,
}
)
or (
# only unary PLUS/MINUS
before_lpar.parent
and before_lpar.parent.type == syms.factor
and (before_lpar.type in {token.PLUS, token.MINUS})
)
):
continue
# Should be followed by a non-empty RPAR...
if (
is_valid_index(next_idx)
and LL[next_idx].type == token.RPAR
and not is_empty_rpar(LL[next_idx])
):
# That RPAR should NOT be followed by anything with higher
# precedence than PERCENT
if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in {
token.DOUBLESTAR,
token.LSQB,
token.LPAR,
token.DOT,
}:
continue
return Ok(string_idx)
return TErr("This line has no strings wrapped in parens.")
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
string_parser = StringParser()
rpar_idx = string_parser.parse(LL, string_idx)
for leaf in (LL[string_idx - 1], LL[rpar_idx]):
if line.comments_after(leaf):
yield TErr(
"Will not strip parentheses which have comments attached to them."
)
return
new_line = line.clone()
new_line.comments = line.comments.copy()
try:
append_leaves(new_line, line, LL[: string_idx - 1])
except BracketMatchError:
# HACK: I believe there is currently a bug somewhere in
# right_hand_split() that is causing brackets to not be tracked
# properly by a shared BracketTracker.
append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True)
string_leaf = Leaf(token.STRING, LL[string_idx].value)
LL[string_idx - 1].remove()
replace_child(LL[string_idx], string_leaf)
new_line.append(string_leaf)
append_leaves(
new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :]
)
LL[rpar_idx].remove()
yield Ok(new_line)
class BaseStringSplitter(StringTransformer):
"""
Abstract class for StringTransformers which transform a Line's strings by splitting
them or placing them on their own lines where necessary to avoid going over
the configured line length.
Requirements:
* The target string value is responsible for the line going over the
line length limit. It follows that after all of black's other line
split methods have been exhausted, this line (or one of the resulting
lines after all line splits are performed) would still be over the
line_length limit unless we split this string.
AND
* The target string is NOT a "pointless" string (i.e. a string that has
no parent or siblings).
AND
* The target string is not followed by an inline comment that appears
to be a pragma.
AND
* The target string is not a multiline (i.e. triple-quote) string.
"""
STRING_OPERATORS: Final = [
token.EQEQUAL,
token.GREATER,
token.GREATEREQUAL,
token.LESS,
token.LESSEQUAL,
token.NOTEQUAL,
token.PERCENT,
token.PLUS,
token.STAR,
]
@abstractmethod
def do_splitter_match(self, line: Line) -> TMatchResult:
"""
BaseStringSplitter asks its clients to override this method instead of
`StringTransformer.do_match(...)`.
Follows the same protocol as `StringTransformer.do_match(...)`.
Refer to `help(StringTransformer.do_match)` for more information.
"""
def do_match(self, line: Line) -> TMatchResult:
match_result = self.do_splitter_match(line)
if isinstance(match_result, Err):
return match_result
string_idx = match_result.ok()
vresult = self._validate(line, string_idx)
if isinstance(vresult, Err):
return vresult
return match_result
def _validate(self, line: Line, string_idx: int) -> TResult[None]:
"""
Checks that @line meets all of the requirements listed in this classes'
docstring. Refer to `help(BaseStringSplitter)` for a detailed
description of those requirements.
Returns:
* Ok(None), if ALL of the requirements are met.
OR
* Err(CannotTransform), if ANY of the requirements are NOT met.
"""
LL = line.leaves
string_leaf = LL[string_idx]
max_string_length = self._get_max_string_length(line, string_idx)
if len(string_leaf.value) <= max_string_length:
return TErr(
"The string itself is not what is causing this line to be too long."
)
if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [
token.STRING,
token.NEWLINE,
]:
return TErr(
f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
" no parent)."
)
if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment(
line.comments[id(line.leaves[string_idx])]
):
return TErr(
"Line appears to end with an inline pragma comment. Splitting the line"
" could modify the pragma's behavior."
)
if has_triple_quotes(string_leaf.value):
return TErr("We cannot split multiline strings.")
return Ok(None)
def _get_max_string_length(self, line: Line, string_idx: int) -> int:
"""
Calculates the max string length used when attempting to determine
whether or not the target string is responsible for causing the line to
go over the line length limit.
WARNING: This method is tightly coupled to both StringSplitter and
(especially) StringParenWrapper. There is probably a better way to
accomplish what is being done here.
Returns:
max_string_length: such that `line.leaves[string_idx].value >
max_string_length` implies that the target string IS responsible
for causing this line to exceed the line length limit.
"""
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
# We use the shorthand "WMA4" in comments to abbreviate "We must
# account for". When giving examples, we use STRING to mean some/any
# valid string.
#
# Finally, we use the following convenience variables:
#
# P: The leaf that is before the target string leaf.
# N: The leaf that is after the target string leaf.
# NN: The leaf that is after N.
# WMA4 the whitespace at the beginning of the line.
offset = line.depth * 4
if is_valid_index(string_idx - 1):
p_idx = string_idx - 1
if (
LL[string_idx - 1].type == token.LPAR
and LL[string_idx - 1].value == ""
and string_idx >= 2
):
# If the previous leaf is an empty LPAR placeholder, we should skip it.
p_idx -= 1
P = LL[p_idx]
if P.type in self.STRING_OPERATORS:
# WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`).
offset += len(str(P)) + 1
if P.type == token.COMMA:
# WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`].
offset += 3
if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]:
# This conditional branch is meant to handle dictionary keys,
# variable assignments, 'return STRING' statement lines, and
# 'else STRING' ternary expression lines.
# WMA4 a single space.
offset += 1
# WMA4 the lengths of any leaves that came before that space,
# but after any closing bracket before that space.
for leaf in reversed(LL[: p_idx + 1]):
offset += len(str(leaf))
if leaf.type in CLOSING_BRACKETS:
break
if is_valid_index(string_idx + 1):
N = LL[string_idx + 1]
if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2:
# If the next leaf is an empty RPAR placeholder, we should skip it.
N = LL[string_idx + 2]
if N.type == token.COMMA:
# WMA4 a single comma at the end of the string (e.g `STRING,`).
offset += 1
if is_valid_index(string_idx + 2):
NN = LL[string_idx + 2]
if N.type == token.DOT and NN.type == token.NAME:
# This conditional branch is meant to handle method calls invoked
# off of a string literal up to and including the LPAR character.
# WMA4 the '.' character.
offset += 1
if (
is_valid_index(string_idx + 3)
and LL[string_idx + 3].type == token.LPAR
):
# WMA4 the left parenthesis character.
offset += 1
# WMA4 the length of the method's name.
offset += len(NN.value)
has_comments = False
for comment_leaf in line.comments_after(LL[string_idx]):
if not has_comments:
has_comments = True
# WMA4 two spaces before the '#' character.
offset += 2
# WMA4 the length of the inline comment.
offset += len(comment_leaf.value)
max_string_length = self.line_length - offset
return max_string_length
@staticmethod
def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the "prefer paren wrap" statement
requirements listed in the 'Requirements' section of the StringParenWrapper
class's docstring.
OR
None, otherwise.
"""
# The line must start with a string.
if LL[0].type != token.STRING:
return None
# If the string is surrounded by commas (or is the first/last child)...
prev_sibling = LL[0].prev_sibling
next_sibling = LL[0].next_sibling
if not prev_sibling and not next_sibling and parent_type(LL[0]) == syms.atom:
# If it's an atom string, we need to check the parent atom's siblings.
parent = LL[0].parent
assert parent is not None # For type checkers.
prev_sibling = parent.prev_sibling
next_sibling = parent.next_sibling
if (not prev_sibling or prev_sibling.type == token.COMMA) and (
not next_sibling or next_sibling.type == token.COMMA
):
return 0
return None
def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:
"""
Yields spans corresponding to expressions in a given f-string.
Spans are half-open ranges (left inclusive, right exclusive).
Assumes the input string is a valid f-string, but will not crash if the input
string is invalid.
"""
stack: List[int] = [] # our curly paren stack
i = 0
while i < len(s):
if s[i] == "{":
# if we're in a string part of the f-string, ignore escaped curly braces
if not stack and i + 1 < len(s) and s[i + 1] == "{":
i += 2
continue
stack.append(i)
i += 1
continue
if s[i] == "}":
if not stack:
i += 1
continue
j = stack.pop()
# we've made it back out of the expression! yield the span
if not stack:
yield (j, i + 1)
i += 1
continue
# if we're in an expression part of the f-string, fast forward through strings
# note that backslashes are not legal in the expression portion of f-strings
if stack:
delim = None
if s[i : i + 3] in ("'''", '"""'):
delim = s[i : i + 3]
elif s[i] in ("'", '"'):
delim = s[i]
if delim:
i += len(delim)
while i < len(s) and s[i : i + len(delim)] != delim:
i += 1
i += len(delim)
continue
i += 1
def fstring_contains_expr(s: str) -> bool:
return any(iter_fexpr_spans(s))
class StringSplitter(BaseStringSplitter, CustomSplitMapMixin):
"""
StringTransformer that splits "atom" strings (i.e. strings which exist on
lines by themselves).
Requirements:
* The line consists ONLY of a single string (possibly prefixed by a
string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE
a trailing comma.
AND
* All of the requirements listed in BaseStringSplitter's docstring.
Transformations:
The string mentioned in the 'Requirements' section is split into as
many substrings as necessary to adhere to the configured line length.
In the final set of substrings, no substring should be smaller than
MIN_SUBSTR_SIZE characters.
The string will ONLY be split on spaces (i.e. each new substring should
start with a space). Note that the string will NOT be split on a space
which is escaped with a backslash.
If the string is an f-string, it will NOT be split in the middle of an
f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x
else bar()} is an f-expression).
If the string that is being split has an associated set of custom split
records and those custom splits will NOT result in any line going over
the configured line length, those custom splits are used. Otherwise the
string is split as late as possible (from left-to-right) while still
adhering to the transformation rules listed above.
Collaborations:
StringSplitter relies on StringMerger to construct the appropriate
CustomSplit objects and add them to the custom split map.
"""
MIN_SUBSTR_SIZE: Final = 6
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
if self._prefer_paren_wrap_match(LL) is not None:
return TErr("Line needs to be wrapped in parens first.")
is_valid_index = is_valid_index_factory(LL)
idx = 0
# The first two leaves MAY be the 'not in' keywords...
if (
is_valid_index(idx)
and is_valid_index(idx + 1)
and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME]
and str(LL[idx]) + str(LL[idx + 1]) == "not in"
):
idx += 2
# Else the first leaf MAY be a string operator symbol or the 'in' keyword...
elif is_valid_index(idx) and (
LL[idx].type in self.STRING_OPERATORS
or LL[idx].type == token.NAME
and str(LL[idx]) == "in"
):
idx += 1
# The next/first leaf MAY be an empty LPAR...
if is_valid_index(idx) and is_empty_lpar(LL[idx]):
idx += 1
# The next/first leaf MUST be a string...
if not is_valid_index(idx) or LL[idx].type != token.STRING:
return TErr("Line does not start with a string.")
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# That string MAY be followed by an empty RPAR...
if is_valid_index(idx) and is_empty_rpar(LL[idx]):
idx += 1
# That string / empty RPAR leaf MAY be followed by a comma...
if is_valid_index(idx) and LL[idx].type == token.COMMA:
idx += 1
# But no more leaves are allowed...
if is_valid_index(idx):
return TErr("This line does not end with a string.")
return Ok(string_idx)
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
QUOTE = LL[string_idx].value[-1]
is_valid_index = is_valid_index_factory(LL)
insert_str_child = insert_str_child_factory(LL[string_idx])
prefix = get_string_prefix(LL[string_idx].value).lower()
# We MAY choose to drop the 'f' prefix from substrings that don't
# contain any f-expressions, but ONLY if the original f-string
# contains at least one f-expression. Otherwise, we will alter the AST
# of the program.
drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr(
LL[string_idx].value
)
first_string_line = True
string_op_leaves = self._get_string_operator_leaves(LL)
string_op_leaves_length = (
sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1
if string_op_leaves
else 0
)
def maybe_append_string_operators(new_line: Line) -> None:
"""
Side Effects:
If @line starts with a string operator and this is the first
line we are constructing, this function appends the string
operator to @new_line and replaces the old string operator leaf
in the node structure. Otherwise this function does nothing.
"""
maybe_prefix_leaves = string_op_leaves if first_string_line else []
for i, prefix_leaf in enumerate(maybe_prefix_leaves):
replace_child(LL[i], prefix_leaf)
new_line.append(prefix_leaf)
ends_with_comma = (
is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA
)
def max_last_string() -> int:
"""
Returns:
The max allowed length of the string value used for the last
line we will construct.
"""
result = self.line_length
result -= line.depth * 4
result -= 1 if ends_with_comma else 0
result -= string_op_leaves_length
return result
# --- Calculate Max Break Index (for string value)
# We start with the line length limit
max_break_idx = self.line_length
# The last index of a string of length N is N-1.
max_break_idx -= 1
# Leading whitespace is not present in the string value (e.g. Leaf.value).
max_break_idx -= line.depth * 4
if max_break_idx < 0:
yield TErr(
f"Unable to split {LL[string_idx].value} at such high of a line depth:"
f" {line.depth}"
)
return
# Check if StringMerger registered any custom splits.
custom_splits = self.pop_custom_splits(LL[string_idx].value)
# We use them ONLY if none of them would produce lines that exceed the
# line limit.
use_custom_breakpoints = bool(
custom_splits
and all(csplit.break_idx <= max_break_idx for csplit in custom_splits)
)
# Temporary storage for the remaining chunk of the string line that
# can't fit onto the line currently being constructed.
rest_value = LL[string_idx].value
def more_splits_should_be_made() -> bool:
"""
Returns:
True iff `rest_value` (the remaining string value from the last
split), should be split again.
"""
if use_custom_breakpoints:
return len(custom_splits) > 1
else:
return len(rest_value) > max_last_string()
string_line_results: List[Ok[Line]] = []
while more_splits_should_be_made():
if use_custom_breakpoints:
# Custom User Split (manual)
csplit = custom_splits.pop(0)
break_idx = csplit.break_idx
else:
# Algorithmic Split (automatic)
max_bidx = max_break_idx - string_op_leaves_length
maybe_break_idx = self._get_break_idx(rest_value, max_bidx)
if maybe_break_idx is None:
# If we are unable to algorithmically determine a good split
# and this string has custom splits registered to it, we
# fall back to using them--which means we have to start
# over from the beginning.
if custom_splits:
rest_value = LL[string_idx].value
string_line_results = []
first_string_line = True
use_custom_breakpoints = True
continue
# Otherwise, we stop splitting here.
break
break_idx = maybe_break_idx
# --- Construct `next_value`
next_value = rest_value[:break_idx] + QUOTE
# HACK: The following 'if' statement is a hack to fix the custom
# breakpoint index in the case of either: (a) substrings that were
# f-strings but will have the 'f' prefix removed OR (b) substrings
# that were not f-strings but will now become f-strings because of
# redundant use of the 'f' prefix (i.e. none of the substrings
# contain f-expressions but one or more of them had the 'f' prefix
# anyway; in which case, we will prepend 'f' to _all_ substrings).
#
# There is probably a better way to accomplish what is being done
# here...
#
# If this substring is an f-string, we _could_ remove the 'f'
# prefix, and the current custom split did NOT originally use a
# prefix...
if (
next_value != self._normalize_f_string(next_value, prefix)
and use_custom_breakpoints
and not csplit.has_prefix
):
# Then `csplit.break_idx` will be off by one after removing
# the 'f' prefix.
break_idx += 1
next_value = rest_value[:break_idx] + QUOTE
if drop_pointless_f_prefix:
next_value = self._normalize_f_string(next_value, prefix)
# --- Construct `next_leaf`
next_leaf = Leaf(token.STRING, next_value)
insert_str_child(next_leaf)
self._maybe_normalize_string_quotes(next_leaf)
# --- Construct `next_line`
next_line = line.clone()
maybe_append_string_operators(next_line)
next_line.append(next_leaf)
string_line_results.append(Ok(next_line))
rest_value = prefix + QUOTE + rest_value[break_idx:]
first_string_line = False
yield from string_line_results
if drop_pointless_f_prefix:
rest_value = self._normalize_f_string(rest_value, prefix)
rest_leaf = Leaf(token.STRING, rest_value)
insert_str_child(rest_leaf)
# NOTE: I could not find a test case that verifies that the following
# line is actually necessary, but it seems to be. Otherwise we risk
# not normalizing the last substring, right?
self._maybe_normalize_string_quotes(rest_leaf)
last_line = line.clone()
maybe_append_string_operators(last_line)
# If there are any leaves to the right of the target string...
if is_valid_index(string_idx + 1):
# We use `temp_value` here to determine how long the last line
# would be if we were to append all the leaves to the right of the
# target string to the last string line.
temp_value = rest_value
for leaf in LL[string_idx + 1 :]:
temp_value += str(leaf)
if leaf.type == token.LPAR:
break
# Try to fit them all on the same line with the last substring...
if (
len(temp_value) <= max_last_string()
or LL[string_idx + 1].type == token.COMMA
):
last_line.append(rest_leaf)
append_leaves(last_line, line, LL[string_idx + 1 :])
yield Ok(last_line)
# Otherwise, place the last substring on one line and everything
# else on a line below that...
else:
last_line.append(rest_leaf)
yield Ok(last_line)
non_string_line = line.clone()
append_leaves(non_string_line, line, LL[string_idx + 1 :])
yield Ok(non_string_line)
# Else the target string was the last leaf...
else:
last_line.append(rest_leaf)
last_line.comments = line.comments.copy()
yield Ok(last_line)
def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
"""
Yields:
All ranges of @string which, if @string were to be split there,
would result in the splitting of an \\N{...} expression (which is NOT
allowed).
"""
# True - the previous backslash was unescaped
# False - the previous backslash was escaped *or* there was no backslash
previous_was_unescaped_backslash = False
it = iter(enumerate(string))
for idx, c in it:
if c == "\\":
previous_was_unescaped_backslash = not previous_was_unescaped_backslash
continue
if not previous_was_unescaped_backslash or c != "N":
previous_was_unescaped_backslash = False
continue
previous_was_unescaped_backslash = False
begin = idx - 1 # the position of backslash before \N{...}
for idx, c in it:
if c == "}":
end = idx
break
else:
# malformed nameescape expression?
# should have been detected by AST parsing earlier...
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
yield begin, end
def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
"""
Yields:
All ranges of @string which, if @string were to be split there,
would result in the splitting of an f-expression (which is NOT
allowed).
"""
if "f" not in get_string_prefix(string).lower():
return
yield from iter_fexpr_spans(string)
def _get_illegal_split_indices(self, string: str) -> Set[Index]:
illegal_indices: Set[Index] = set()
iterators = [
self._iter_fexpr_slices(string),
self._iter_nameescape_slices(string),
]
for it in iterators:
for begin, end in it:
illegal_indices.update(range(begin, end + 1))
return illegal_indices
def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]:
"""
This method contains the algorithm that StringSplitter uses to
determine which character to split each string at.
Args:
@string: The substring that we are attempting to split.
@max_break_idx: The ideal break index. We will return this value if it
meets all the necessary conditions. In the likely event that it
doesn't we will try to find the closest index BELOW @max_break_idx
that does. If that fails, we will expand our search by also
considering all valid indices ABOVE @max_break_idx.
Pre-Conditions:
* assert_is_leaf_string(@string)
* 0 <= @max_break_idx < len(@string)
Returns:
break_idx, if an index is able to be found that meets all of the
conditions listed in the 'Transformations' section of this classes'
docstring.
OR
None, otherwise.
"""
is_valid_index = is_valid_index_factory(string)
assert is_valid_index(max_break_idx)
assert_is_leaf_string(string)
_illegal_split_indices = self._get_illegal_split_indices(string)
def breaks_unsplittable_expression(i: Index) -> bool:
"""
Returns:
True iff returning @i would result in the splitting of an
unsplittable expression (which is NOT allowed).
"""
return i in _illegal_split_indices
def passes_all_checks(i: Index) -> bool:
"""
Returns:
True iff ALL of the conditions listed in the 'Transformations'
section of this classes' docstring would be be met by returning @i.
"""
is_space = string[i] == " "
is_not_escaped = True
j = i - 1
while is_valid_index(j) and string[j] == "\\":
is_not_escaped = not is_not_escaped
j -= 1
is_big_enough = (
len(string[i:]) >= self.MIN_SUBSTR_SIZE
and len(string[:i]) >= self.MIN_SUBSTR_SIZE
)
return (
is_space
and is_not_escaped
and is_big_enough
and not breaks_unsplittable_expression(i)
)
# First, we check all indices BELOW @max_break_idx.
break_idx = max_break_idx
while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx):
break_idx -= 1
if not passes_all_checks(break_idx):
# If that fails, we check all indices ABOVE @max_break_idx.
#
# If we are able to find a valid index here, the next line is going
# to be longer than the specified line length, but it's probably
# better than doing nothing at all.
break_idx = max_break_idx + 1
while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx):
break_idx += 1
if not is_valid_index(break_idx) or not passes_all_checks(break_idx):
return None
return break_idx
def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None:
if self.normalize_strings:
leaf.value = normalize_string_quotes(leaf.value)
def _normalize_f_string(self, string: str, prefix: str) -> str:
"""
Pre-Conditions:
* assert_is_leaf_string(@string)
Returns:
* If @string is an f-string that contains no f-expressions, we
return a string identical to @string except that the 'f' prefix
has been stripped and all double braces (i.e. '{{' or '}}') have
been normalized (i.e. turned into '{' or '}').
OR
* Otherwise, we return @string.
"""
assert_is_leaf_string(string)
if "f" in prefix and not fstring_contains_expr(string):
new_prefix = prefix.replace("f", "")
temp = string[len(prefix) :]
temp = re.sub(r"\{\{", "{", temp)
temp = re.sub(r"\}\}", "}", temp)
new_string = temp
return f"{new_prefix}{new_string}"
else:
return string
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]:
LL = list(leaves)
string_op_leaves = []
i = 0
while LL[i].type in self.STRING_OPERATORS + [token.NAME]:
prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip())
string_op_leaves.append(prefix_leaf)
i += 1
return string_op_leaves
class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin):
"""
StringTransformer that wraps strings in parens and then splits at the LPAR.
Requirements:
All of the requirements listed in BaseStringSplitter's docstring in
addition to the requirements listed below:
* The line is a return/yield statement, which returns/yields a string.
OR
* The line is part of a ternary expression (e.g. `x = y if cond else
z`) such that the line starts with `else <string>`, where <string> is
some string.
OR
* The line is an assert statement, which ends with a string.
OR
* The line is an assignment statement (e.g. `x = <string>` or `x +=
<string>`) such that the variable is being assigned the value of some
string.
OR
* The line is a dictionary key assignment where some valid key is being
assigned the value of some string.
OR
* The line starts with an "atom" string that prefers to be wrapped in
parens. It's preferred to be wrapped when the string is surrounded by
commas (or is the first/last child).
Transformations:
The chosen string is wrapped in parentheses and then split at the LPAR.
We then have one line which ends with an LPAR and another line that
starts with the chosen string. The latter line is then split again at
the RPAR. This results in the RPAR (and possibly a trailing comma)
being placed on its own line.
NOTE: If any leaves exist to the right of the chosen string (except
for a trailing comma, which would be placed after the RPAR), those
leaves are placed inside the parentheses. In effect, the chosen
string is not necessarily being "wrapped" by parentheses. We can,
however, count on the LPAR being placed directly before the chosen
string.
In other words, StringParenWrapper creates "atom" strings. These
can then be split again by StringSplitter, if necessary.
Collaborations:
In the event that a string line split by StringParenWrapper is
changed such that it no longer needs to be given its own line,
StringParenWrapper relies on StringParenStripper to clean up the
parentheses it created.
For "atom" strings that prefers to be wrapped in parens, it requires
StringSplitter to hold the split until the string is wrapped in parens.
"""
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
if line.leaves[-1].type in OPENING_BRACKETS:
return TErr(
"Cannot wrap parens around a line that ends in an opening bracket."
)
string_idx = (
self._return_match(LL)
or self._else_match(LL)
or self._assert_match(LL)
or self._assign_match(LL)
or self._dict_match(LL)
or self._prefer_paren_wrap_match(LL)
)
if string_idx is not None:
string_value = line.leaves[string_idx].value
# If the string has no spaces...
if " " not in string_value:
# And will still violate the line length limit when split...
max_string_length = self.line_length - ((line.depth + 1) * 4)
if len(string_value) > max_string_length:
# And has no associated custom splits...
if not self.has_custom_splits(string_value):
# Then we should NOT put this string on its own line.
return TErr(
"We do not wrap long strings in parentheses when the"
" resultant line would still be over the specified line"
" length and can't be split further by StringSplitter."
)
return Ok(string_idx)
return TErr("This line does not contain any non-atomic strings.")
@staticmethod
def _return_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the return/yield statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of a return/yield statement and the first leaf
# contains either the "return" or "yield" keywords...
if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
0
].value in ["return", "yield"]:
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _else_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the ternary expression
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of a ternary expression and the first leaf
# contains the "else" keyword...
if (
parent_type(LL[0]) == syms.test
and LL[0].type == token.NAME
and LL[0].value == "else"
):
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _assert_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the assert statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of an assert statement and the first leaf
# contains the "assert" keyword...
if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a comma...
if leaf.type == token.COMMA:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That comma MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _assign_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the assignment statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of an expression statement or is a function
# argument AND the first leaf contains a variable name...
if (
parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power]
and LL[0].type == token.NAME
):
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find either an '=' or '+=' symbol...
if leaf.type in [token.EQUAL, token.PLUSEQUAL]:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That symbol MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# The next leaf MAY be a comma iff this line is apart
# of a function argument...
if (
parent_type(LL[0]) == syms.argument
and is_valid_index(idx)
and LL[idx].type == token.COMMA
):
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _dict_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the dictionary key assignment
statement requirements listed in the 'Requirements' section of this
classes' docstring.
OR
None, otherwise.
"""
# If this line is apart of a dictionary key assignment...
if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]:
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a colon...
if leaf.type == token.COLON:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That colon MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# That string MAY be followed by a comma...
if is_valid_index(idx) and LL[idx].type == token.COMMA:
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
insert_str_child = insert_str_child_factory(LL[string_idx])
comma_idx = -1
ends_with_comma = False
if LL[comma_idx].type == token.COMMA:
ends_with_comma = True
leaves_to_steal_comments_from = [LL[string_idx]]
if ends_with_comma:
leaves_to_steal_comments_from.append(LL[comma_idx])
# --- First Line
first_line = line.clone()
left_leaves = LL[:string_idx]
# We have to remember to account for (possibly invisible) LPAR and RPAR
# leaves that already wrapped the target string. If these leaves do
# exist, we will replace them with our own LPAR and RPAR leaves.
old_parens_exist = False
if left_leaves and left_leaves[-1].type == token.LPAR:
old_parens_exist = True
leaves_to_steal_comments_from.append(left_leaves[-1])
left_leaves.pop()
append_leaves(first_line, line, left_leaves)
lpar_leaf = Leaf(token.LPAR, "(")
if old_parens_exist:
replace_child(LL[string_idx - 1], lpar_leaf)
else:
insert_str_child(lpar_leaf)
first_line.append(lpar_leaf)
# We throw inline comments that were originally to the right of the
# target string to the top line. They will now be shown to the right of
# the LPAR.
for leaf in leaves_to_steal_comments_from:
for comment_leaf in line.comments_after(leaf):
first_line.append(comment_leaf, preformatted=True)
yield Ok(first_line)
# --- Middle (String) Line
# We only need to yield one (possibly too long) string line, since the
# `StringSplitter` will break it down further if necessary.
string_value = LL[string_idx].value
string_line = Line(
mode=line.mode,
depth=line.depth + 1,
inside_brackets=True,
should_split_rhs=line.should_split_rhs,
magic_trailing_comma=line.magic_trailing_comma,
)
string_leaf = Leaf(token.STRING, string_value)
insert_str_child(string_leaf)
string_line.append(string_leaf)
old_rpar_leaf = None
if is_valid_index(string_idx + 1):
right_leaves = LL[string_idx + 1 :]
if ends_with_comma:
right_leaves.pop()
if old_parens_exist:
assert right_leaves and right_leaves[-1].type == token.RPAR, (
"Apparently, old parentheses do NOT exist?!"
f" (left_leaves={left_leaves}, right_leaves={right_leaves})"
)
old_rpar_leaf = right_leaves.pop()
append_leaves(string_line, line, right_leaves)
yield Ok(string_line)
# --- Last Line
last_line = line.clone()
last_line.bracket_tracker = first_line.bracket_tracker
new_rpar_leaf = Leaf(token.RPAR, ")")
if old_rpar_leaf is not None:
replace_child(old_rpar_leaf, new_rpar_leaf)
else:
insert_str_child(new_rpar_leaf)
last_line.append(new_rpar_leaf)
# If the target string ended with a comma, we place this comma to the
# right of the RPAR on the last line.
if ends_with_comma:
comma_leaf = Leaf(token.COMMA, ",")
replace_child(LL[comma_idx], comma_leaf)
last_line.append(comma_leaf)
yield Ok(last_line)
class StringParser:
"""
A state machine that aids in parsing a string's "trailer", which can be
either non-existent, an old-style formatting sequence (e.g. `% varX` or `%
(varX, varY)`), or a method-call / attribute access (e.g. `.format(varX,
varY)`).
NOTE: A new StringParser object MUST be instantiated for each string
trailer we need to parse.
Examples:
We shall assume that `line` equals the `Line` object that corresponds
to the following line of python code:
```
x = "Some {}.".format("String") + some_other_string
```
Furthermore, we will assume that `string_idx` is some index such that:
```
assert line.leaves[string_idx].value == "Some {}."
```
The following code snippet then holds:
```
string_parser = StringParser()
idx = string_parser.parse(line.leaves, string_idx)
assert line.leaves[idx].type == token.PLUS
```
"""
DEFAULT_TOKEN: Final = 20210605
# String Parser States
START: Final = 1
DOT: Final = 2
NAME: Final = 3
PERCENT: Final = 4
SINGLE_FMT_ARG: Final = 5
LPAR: Final = 6
RPAR: Final = 7
DONE: Final = 8
# Lookup Table for Next State
_goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = {
# A string trailer may start with '.' OR '%'.
(START, token.DOT): DOT,
(START, token.PERCENT): PERCENT,
(START, DEFAULT_TOKEN): DONE,
# A '.' MUST be followed by an attribute or method name.
(DOT, token.NAME): NAME,
# A method name MUST be followed by an '(', whereas an attribute name
# is the last symbol in the string trailer.
(NAME, token.LPAR): LPAR,
(NAME, DEFAULT_TOKEN): DONE,
# A '%' symbol can be followed by an '(' or a single argument (e.g. a
# string or variable name).
(PERCENT, token.LPAR): LPAR,
(PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG,
# If a '%' symbol is followed by a single argument, that argument is
# the last leaf in the string trailer.
(SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE,
# If present, a ')' symbol is the last symbol in a string trailer.
# (NOTE: LPARS and nested RPARS are not included in this lookup table,
# since they are treated as a special case by the parsing logic in this
# classes' implementation.)
(RPAR, DEFAULT_TOKEN): DONE,
}
def __init__(self) -> None:
self._state = self.START
self._unmatched_lpars = 0
def parse(self, leaves: List[Leaf], string_idx: int) -> int:
"""
Pre-conditions:
* @leaves[@string_idx].type == token.STRING
Returns:
The index directly after the last leaf which is apart of the string
trailer, if a "trailer" exists.
OR
@string_idx + 1, if no string "trailer" exists.
"""
assert leaves[string_idx].type == token.STRING
idx = string_idx + 1
while idx < len(leaves) and self._next_state(leaves[idx]):
idx += 1
return idx
def _next_state(self, leaf: Leaf) -> bool:
"""
Pre-conditions:
* On the first call to this function, @leaf MUST be the leaf that
was directly after the string leaf in question (e.g. if our target
string is `line.leaves[i]` then the first call to this method must
be `line.leaves[i + 1]`).
* On the next call to this function, the leaf parameter passed in
MUST be the leaf directly following @leaf.
Returns:
True iff @leaf is apart of the string's trailer.
"""
# We ignore empty LPAR or RPAR leaves.
if is_empty_par(leaf):
return True
next_token = leaf.type
if next_token == token.LPAR:
self._unmatched_lpars += 1
current_state = self._state
# The LPAR parser state is a special case. We will return True until we
# find the matching RPAR token.
if current_state == self.LPAR:
if next_token == token.RPAR:
self._unmatched_lpars -= 1
if self._unmatched_lpars == 0:
self._state = self.RPAR
# Otherwise, we use a lookup table to determine the next state.
else:
# If the lookup table matches the current state to the next
# token, we use the lookup table.
if (current_state, next_token) in self._goto:
self._state = self._goto[current_state, next_token]
else:
# Otherwise, we check if a the current state was assigned a
# default.
if (current_state, self.DEFAULT_TOKEN) in self._goto:
self._state = self._goto[current_state, self.DEFAULT_TOKEN]
# If no default has been assigned, then this parser has a logic
# error.
else:
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
if self._state == self.DONE:
return False
return True
def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]:
"""
Factory for a convenience function that is used to orphan @string_leaf
and then insert multiple new leaves into the same part of the node
structure that @string_leaf had originally occupied.
Examples:
Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N =
string_leaf.parent`. Assume the node `N` has the following
original structure:
Node(
expr_stmt, [
Leaf(NAME, 'x'),
Leaf(EQUAL, '='),
Leaf(STRING, '"foo"'),
]
)
We then run the code snippet shown below.
```
insert_str_child = insert_str_child_factory(string_leaf)
lpar = Leaf(token.LPAR, '(')
insert_str_child(lpar)
bar = Leaf(token.STRING, '"bar"')
insert_str_child(bar)
rpar = Leaf(token.RPAR, ')')
insert_str_child(rpar)
```
After which point, it follows that `string_leaf.parent is None` and
the node `N` now has the following structure:
Node(
expr_stmt, [
Leaf(NAME, 'x'),
Leaf(EQUAL, '='),
Leaf(LPAR, '('),
Leaf(STRING, '"bar"'),
Leaf(RPAR, ')'),
]
)
"""
string_parent = string_leaf.parent
string_child_idx = string_leaf.remove()
def insert_str_child(child: LN) -> None:
nonlocal string_child_idx
assert string_parent is not None
assert string_child_idx is not None
string_parent.insert_child(string_child_idx, child)
string_child_idx += 1
return insert_str_child
def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]:
"""
Examples:
```
my_list = [1, 2, 3]
is_valid_index = is_valid_index_factory(my_list)
assert is_valid_index(0)
assert is_valid_index(2)
assert not is_valid_index(3)
assert not is_valid_index(-1)
```
"""
def is_valid_index(idx: int) -> bool:
"""
Returns:
True iff @idx is positive AND seq[@idx] does NOT raise an
IndexError.
"""
return 0 <= idx < len(seq)
return is_valid_index
| mit | 76e65fd2d3a40c08b399aa00b8a25a88 | 36.265066 | 88 | 0.560953 | 4.256055 | false | false | false | false |
psf/black | tests/data/simple_cases/comments2.py | 2 | 7638 | from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component # DRY
)
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Any',
'Callable',
'ClassVar',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
# Concrete collection types.
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'Generator',
]
not_shareables = [
# singletons
True,
False,
NotImplemented, ...,
# builtin types and objects
type,
object,
object(),
Exception(),
42,
100.0,
"spam",
# user-defined types and objects
Cheese,
Cheese("Wensleydale"),
SubBytes(b"spam"),
]
if 'PYTHON' in os.environ:
add_compiler(compiler_from_env())
else:
# for compiler in compilers.values():
# add_compiler(compiler)
add_compiler(compilers[(7.0, 32)])
# add_compiler(compilers[(7.1, 64)])
# Comment before function.
def inline_comments_in_brackets_ruin_everything():
if typedargslist:
parameters.children = [
children[0], # (1
body,
children[-1] # )1
]
parameters.children = [
children[0],
body,
children[-1], # type: ignore
]
else:
parameters.children = [
parameters.children[0], # (2 what if this was actually long
body,
parameters.children[-1], # )2
]
parameters.children = [parameters.what_if_this_was_actually_long.children[0], body, parameters.children[-1]] # type: ignore
if (self._proc is not None
# has the child process finished?
and self._returncode is None
# the child process has finished, but the
# transport hasn't been notified yet?
and self._proc.poll() is None):
pass
# no newline before or after
short = [
# one
1,
# two
2]
# no newline after
call(arg1, arg2, """
short
""", arg3=True)
############################################################################
call2(
#short
arg1,
#but
arg2,
#multiline
"""
short
""",
# yup
arg3=True)
lcomp = [
element # yup
for element in collection # yup
if element is not None # right
]
lcomp2 = [
# hello
element
# yup
for element in collection
# right
if element is not None
]
lcomp3 = [
# This one is actually too long to fit in a single line.
element.split('\n', 1)[0]
# yup
for element in collection.select_elements()
# right
if element is not None
]
while True:
if False:
continue
# and round and round we go
# and round and round we go
# let's return
return Node(
syms.simple_stmt,
[
Node(statement, result),
Leaf(token.NEWLINE, '\n') # FIXME: \r\n?
],
)
CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final
class Test:
def _init_host(self, parsed) -> None:
if (parsed.hostname is None or # type: ignore
not parsed.hostname.strip()):
pass
#######################
### SECTION COMMENT ###
#######################
instruction()#comment with bad spacing
# END COMMENTS
# MORE END COMMENTS
# output
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent, # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component, # DRY
)
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
"Any",
"Callable",
"ClassVar",
# ABCs (from collections.abc).
"AbstractSet", # collections.abc.Set.
"ByteString",
"Container",
# Concrete collection types.
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple", # Not really a type.
"Generator",
]
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
42,
100.0,
"spam",
# user-defined types and objects
Cheese,
Cheese("Wensleydale"),
SubBytes(b"spam"),
]
if "PYTHON" in os.environ:
add_compiler(compiler_from_env())
else:
# for compiler in compilers.values():
# add_compiler(compiler)
add_compiler(compilers[(7.0, 32)])
# add_compiler(compilers[(7.1, 64)])
# Comment before function.
def inline_comments_in_brackets_ruin_everything():
if typedargslist:
parameters.children = [children[0], body, children[-1]] # (1 # )1
parameters.children = [
children[0],
body,
children[-1], # type: ignore
]
else:
parameters.children = [
parameters.children[0], # (2 what if this was actually long
body,
parameters.children[-1], # )2
]
parameters.children = [parameters.what_if_this_was_actually_long.children[0], body, parameters.children[-1]] # type: ignore
if (
self._proc is not None
# has the child process finished?
and self._returncode is None
# the child process has finished, but the
# transport hasn't been notified yet?
and self._proc.poll() is None
):
pass
# no newline before or after
short = [
# one
1,
# two
2,
]
# no newline after
call(
arg1,
arg2,
"""
short
""",
arg3=True,
)
############################################################################
call2(
# short
arg1,
# but
arg2,
# multiline
"""
short
""",
# yup
arg3=True,
)
lcomp = [
element for element in collection if element is not None # yup # yup # right
]
lcomp2 = [
# hello
element
# yup
for element in collection
# right
if element is not None
]
lcomp3 = [
# This one is actually too long to fit in a single line.
element.split("\n", 1)[0]
# yup
for element in collection.select_elements()
# right
if element is not None
]
while True:
if False:
continue
# and round and round we go
# and round and round we go
# let's return
return Node(
syms.simple_stmt,
[Node(statement, result), Leaf(token.NEWLINE, "\n")], # FIXME: \r\n?
)
CONFIG_FILES = (
[
CONFIG_FILE,
]
+ SHARED_CONFIG_FILES
+ USER_CONFIG_FILES
) # type: Final
class Test:
def _init_host(self, parsed) -> None:
if parsed.hostname is None or not parsed.hostname.strip(): # type: ignore
pass
#######################
### SECTION COMMENT ###
#######################
instruction() # comment with bad spacing
# END COMMENTS
# MORE END COMMENTS
| mit | 0c5daadfd40b9c3c6351c08ebdbbf16a | 21.333333 | 132 | 0.539146 | 3.822823 | false | false | false | false |
psf/black | tests/data/preview/comments9.py | 1 | 4268 | # Test for https://github.com/psf/black/issues/246.
some = statement
# This comment should be split from the statement above by two lines.
def function():
pass
some = statement
# This multiline comments section
# should be split from the statement
# above by two lines.
def function():
pass
some = statement
# This comment should be split from the statement above by two lines.
async def async_function():
pass
some = statement
# This comment should be split from the statement above by two lines.
class MyClass:
pass
some = statement
# This should be stick to the statement above
# This should be split from the above by two lines
class MyClassWithComplexLeadingComments:
pass
class ClassWithDocstring:
"""A docstring."""
# Leading comment after a class with just a docstring
class MyClassAfterAnotherClassWithDocstring:
pass
some = statement
# leading 1
@deco1
# leading 2
# leading 2 extra
@deco2(with_args=True)
# leading 3
@deco3
# leading 4
def decorated():
pass
some = statement
# leading 1
@deco1
# leading 2
@deco2(with_args=True)
# leading 3 that already has an empty line
@deco3
# leading 4
def decorated_with_split_leading_comments():
pass
some = statement
# leading 1
@deco1
# leading 2
@deco2(with_args=True)
# leading 3
@deco3
# leading 4 that already has an empty line
def decorated_with_split_leading_comments():
pass
def main():
if a:
# Leading comment before inline function
def inline():
pass
# Another leading comment
def another_inline():
pass
else:
# More leading comments
def inline_after_else():
pass
if a:
# Leading comment before "top-level inline" function
def top_level_quote_inline():
pass
# Another leading comment
def another_top_level_quote_inline_inline():
pass
else:
# More leading comments
def top_level_quote_inline_after_else():
pass
class MyClass:
# First method has no empty lines between bare class def.
# More comments.
def first_method(self):
pass
# output
# Test for https://github.com/psf/black/issues/246.
some = statement
# This comment should be split from the statement above by two lines.
def function():
pass
some = statement
# This multiline comments section
# should be split from the statement
# above by two lines.
def function():
pass
some = statement
# This comment should be split from the statement above by two lines.
async def async_function():
pass
some = statement
# This comment should be split from the statement above by two lines.
class MyClass:
pass
some = statement
# This should be stick to the statement above
# This should be split from the above by two lines
class MyClassWithComplexLeadingComments:
pass
class ClassWithDocstring:
"""A docstring."""
# Leading comment after a class with just a docstring
class MyClassAfterAnotherClassWithDocstring:
pass
some = statement
# leading 1
@deco1
# leading 2
# leading 2 extra
@deco2(with_args=True)
# leading 3
@deco3
# leading 4
def decorated():
pass
some = statement
# leading 1
@deco1
# leading 2
@deco2(with_args=True)
# leading 3 that already has an empty line
@deco3
# leading 4
def decorated_with_split_leading_comments():
pass
some = statement
# leading 1
@deco1
# leading 2
@deco2(with_args=True)
# leading 3
@deco3
# leading 4 that already has an empty line
def decorated_with_split_leading_comments():
pass
def main():
if a:
# Leading comment before inline function
def inline():
pass
# Another leading comment
def another_inline():
pass
else:
# More leading comments
def inline_after_else():
pass
if a:
# Leading comment before "top-level inline" function
def top_level_quote_inline():
pass
# Another leading comment
def another_top_level_quote_inline_inline():
pass
else:
# More leading comments
def top_level_quote_inline_after_else():
pass
class MyClass:
# First method has no empty lines between bare class def.
# More comments.
def first_method(self):
pass
| mit | 969d99f5c0189b21ed8e159c736c3556 | 15.80315 | 69 | 0.679944 | 3.790409 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/csa_pmj.py | 1 | 4498 | #!/usr/bin/env python
# -*- coding: utf-8
# Functions to get distance from PMJ for processing segmentation data
# Author: Sandrine Bédard
import logging
import numpy as np
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.centerline.core import get_centerline
logger = logging.getLogger(__name__)
NEAR_ZERO_THRESHOLD = 1e-6
def get_slices_for_pmj_distance(segmentation, pmj, distance, extent, param_centerline=None, verbose=1):
"""
Interpolate centerline with pontomedullary junction (PMJ) label and compute distance from PMJ along the centerline.
Generate a mask from segmentation of the slices used to process segmentation data corresponding to a distance from PMJ.
:param segmentation: input segmentation. Could be either an Image or a file name.
:param pmj: label of PMJ.
:param distance: float: Distance from PMJ in mm.
:param extent: extent of the coverage mask in mm.
:param param_centerline: see centerline.core.ParamCenterline()
:param verbose:
:return im_ctl:
:return mask:
:return slices:
"""
im_seg = Image(segmentation).change_orientation('RPI')
native_orientation = im_seg.orientation
im_seg.change_orientation('RPI')
im_pmj = Image(pmj).change_orientation('RPI')
if not im_seg.data.shape == im_pmj.data.shape:
raise RuntimeError("segmentation and pmj should be in the same space coordinate.")
# Add PMJ label to the segmentation and then extrapolate to obtain a Centerline object defined between the PMJ
# and the lower end of the centerline.
im_seg_with_pmj = im_seg.copy()
im_seg_with_pmj.data = im_seg_with_pmj.data + im_pmj.data
# Get max and min index of the segmentation with pmj
_, _, Z = (im_seg_with_pmj.data > NEAR_ZERO_THRESHOLD).nonzero()
min_z_index, max_z_index = min(Z), max(Z)
from spinalcordtoolbox.straightening import _get_centerline
# Linear interpolation (vs. bspline) ensures strong robustness towards defective segmentations at the top slices.
param_centerline.algo_fitting = 'linear'
# On top of the linear interpolation we add some smoothing to remove discontinuities.
param_centerline.smooth = 50
param_centerline.minmax = True
# Compute spinalcordtoolbox.types.Centerline class
ctl_seg_with_pmj = _get_centerline(im_seg_with_pmj, param_centerline, verbose=verbose)
# Also get the image centerline (because it is a required output)
# TODO: merge _get_centerline into get_centerline
im_ctl_seg_with_pmj, arr_ctl, _, _ = get_centerline(im_seg_with_pmj, param_centerline, verbose=verbose)
# Compute the incremental distance from the PMJ along each point in the centerline
length_from_pmj = ctl_seg_with_pmj.incremental_length_inverse[::-1]
# From this incremental distance, find the indices corresponding to the requested distance +/- extent/2 from the PMJ
# Get the z index corresponding to the segmentation since the centerline only includes slices of the segmentation.
z_ref = np.array(range(min_z_index.astype(int), max_z_index.max().astype(int) + 1))
zmin = z_ref[np.argmin(np.array([np.abs(i - distance - extent/2) for i in length_from_pmj]))]
zmax = z_ref[np.argmin(np.array([np.abs(i - distance + extent/2) for i in length_from_pmj]))]
# Check if distance is out of bounds
if distance > length_from_pmj[0]:
raise ValueError("Input distance of " + str(distance) + " mm is out of bounds for maximum distance of " + str(length_from_pmj[0]) + " mm")
if distance < length_from_pmj[-1]:
raise ValueError("Input distance of " + str(distance) + " mm is out of bounds for minimum distance of " + str(length_from_pmj[-1]) + " mm")
# Check if the range of selected slices are covered by the segmentation
if not all(np.any(im_seg.data[:, :, z]) for z in range(zmin, zmax)):
raise ValueError(f"The requested distances from the PMJ are not fully covered by the segmentation.\n"
f"The range of slices are: [{zmin}, {zmax}]")
# Create mask from segmentation centered on distance from PMJ and with extent length on z axis.
mask = im_seg.copy()
mask.data[:, :, 0:zmin] = 0
mask.data[:, :, zmax:] = 0
mask.change_orientation(native_orientation)
# Get corresponding slices
slices = "{}:{}".format(zmin, zmax-1) # -1 since the last slice is included to compute CSA after.
return im_ctl_seg_with_pmj.change_orientation(native_orientation), mask, slices, arr_ctl
| mit | 96457b5e241621968f8514a672d8e5f5 | 51.290698 | 147 | 0.706916 | 3.549329 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.